1.如何追蹤問題
我這里遇到的是在通話中開免提,對方無法聽到我的聲音
(1).在ADT的tool目錄下找到hierarchyviewer,用這個工具找到我們的界面上的圖標,得到id(
audioButton
),
然后就可以去相應的APK里面去找那個id了
(2).跟蹤代碼,根據流程電話界面開免提的過程,去找問題
2.分析APK和framework中的調用流程,這里的APK是通話界面的APK
CallButtonFragment.java (packages\apps\incallui\src\com\android\incallui)
onCreateView
mAudioButton = (CompoundButton) parent.findViewById(R.id.audioButton); //找到調用的地方
當按鍵按下時的操作
onClick(View view)
case R.id.audioButton:
onAudioButtonClicked();
getPresenter().toggleSpeakerphone(); //如果不是藍牙狀態
toggleSpeakerphone
//搜索到這個函數在CallButtonPresenter.java (packages\apps\incallui\src\com\android\incallui)
toggleSpeakerphone
int newMode = AudioState.ROUTE_SPEAKER;
setAudioMode(newMode);
TelecomAdapter.getInstance().setAudioRoute(mode); //得到TelecomAdapter類,並調用它的setAudioRoute
// TelecomAdapter.java (packages\apps\incallui\src\com\android\incallui)
mPhone.setAudioRoute(route);
//調用Phone.java (frameworks\base\telecomm\java\android\telecom)的函數
setAudioRoute
mInCallAdapter.setAudioRoute(route);
//調用InCallAdapter.java (frameworks\base\telecomm\java\android\telecom)
mAdapter.setAudioRoute(route);
//調用InCallAdapter.java (packages\services\telecomm\src\com\android\server\telecom)
mHandler.obtainMessage(MSG_SET_AUDIO_ROUTE, route, 0).sendToTarget();
case MSG_SET_AUDIO_ROUTE: //發送消息在這里處理
mCallsManager.setAudioRoute(msg.arg1);
//CallsManager.java (packages\services\telecomm\src\com\android\server\telecom)
mCallAudioManager.setAudioRoute(route);
//CallAudioManager.java (packages\services\telecomm\src\com\android\server\telecom)
setSystemAudioState(mAudioState.isMuted(), newRoute,mAudioState.getSupportedRouteMask());
setSystemAudioState(false /* force */, isMuted, route, supportedRouteMask);
接上面
setSystemAudioState(false /* force */, isMuted, route, supportedRouteMask);
turnOnSpeaker(true);
mAudioManager.setSpeakerphoneOn(on);
//AudioManager.java (frameworks\base\media\java\android\media)
service.setSpeakerphoneOn(on);
IAudioService service = getService(); //得到service,通過bindler
if (sService != null) {
return sService;}IBinder b = ServiceManager.getService(Context.AUDIO_SERVICE); //得到binder服務的引用對象sService = IAudioService.Stub.asInterface(b); //把引用對象裝好為代理對象return sService;
service.setSpeakerphoneOn(on); //調用service的函數
通過binder調用AudioService.java (frameworks\base\media\java\android\media)的函數
setSpeakerphoneOn
mForcedUseForComm = AudioSystem.FORCE_SPEAKER;
sendMsg(mAudioHandler, MSG_SET_FORCE_USE, SENDMSG_QUEUE, AudioSystem.FOR_COMMUNICATION, mForcedUseForComm, null, 0); //發送消息MSG_SET_FORCE_USE
case MSG_SET_FORCE_USE:
case MSG_SET_FORCE_BT_A2DP_USE:
setForceUse(msg.arg1, msg.arg2); //調用這個函數
AudioSystem.setForceUse(usage, config); //調用這里,這里就開始調用到jni
AudioSystem.java (frameworks\base\media\java\android\media)
public static native int setForceUse(int usage, int config); //這個在jni中
3.中間層調用流程
android_media_AudioSystem.cpp (frameworks\base\core\jni)
{"setForceUse", "(II)I", (void *)android_media_AudioSystem_setForceUse},
//這里執行AudioSystem::setForceUse函數
check_AudioSystem_Command(AudioSystem::setForceUse(static_cast <audio_policy_force_use_t>(usage), static_cast <audio_policy_forced_cfg_t>(config)));
sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); //得到policy_service
sp<IServiceManager> sm = defaultServiceManager();
binder = sm->getService(String16("media.audio_policy")); //這兩句得到media.audio_policy的引用對象
gAudioPolicyServiceClient = new AudioPolicyServiceClient(); //如果沒有AudioPolicyServiceClient對象,就new一個
//調用這個IBinder的linkToDeath函數進行注冊。可以注冊一個IBinder.DeathRecipient類型的對象。其中IBinder.DeathRecipient是IBinder類中定義的一個嵌入類
//當這個IBinder所對應的Service進程被異常的退出時,比如被kill掉,這時系統會調用這個IBinder之前通過linkToDeath注冊的DeathRecipient類對象的binderDied函數。
//一般實現中,Bp端會注冊linkToDeath,目的是為了監聽綁定的Service的異常退出,一般的binderDied函數的實現是用來釋放一些相關的資源。
binder->linkToDeath(gAudioPolicyServiceClient);
//新創建一個AudioPolicyService對象並返回,且在創建BpAudioPolicyService時把binder做為其參數,結果是把binder對象賦值給其基類BpRefBase中的mRemote來保存。
//
展開后最終是生成調用new BpAudioPolicyService(new BpBinder(handle)),這里的handle是一個句柄;這樣我們最終得到了AudioPolicyService的代理BpAudioPolicyService,通過它就可以和AudioPolicyService的本地接口BnAudioPolicyService通訊了。
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
apc = gAudioPolicyServiceClient;
ap = gAudioPolicyService;
ap->registerClient(apc); //注冊gAudioPolicyServiceClient
aps->getForceUse(usage); //下面單獨講解
AudioPolicyManager::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
AudioPolicyManager.cpp (frameworks\av\services\audiopolicy)
setForceUse
checkA2dpSuspend(); //A2DP全名是Advanced Audio Distribution Profile 藍牙音頻傳輸模型協定
checkOutputForAllStrategies //校驗輸出策略
checkOutputForStrategy(STRATEGY_SONIFICATION) //這里只分析STRATEGY_SONIFICATION,其他情況基本一樣
audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/); //得到原來的策略
return mDeviceForStrategy[strategy]; //直接返回mDeviceForStrategy[STRATEGY_SONIFICATION];
audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/); //getDeviceForStrategy.得到AUDIO_DEVICE_OUT_SPEAKER
audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types(); //得到可用的output設備
case STRATEGY_SONIFICATION:
if (isInCall()) { //打電話中
device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/); //應該返回device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
case STRATEGY_PHONE:
getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION); //得到輸入源和混音器,這里是mic
getDeviceForInputSource(inputSource); //得到輸入源
case AUDIO_SOURCE_VOICE_COMMUNICATION: // audio_source_t類型的一些判斷
case AUDIO_POLICY_FORCE_SPEAKER:
device = AUDIO_DEVICE_IN_BACK_MIC; //返回mic
updateDevicesAndOutputs();
mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/); //更新設備策略
mPreviousOutputs = mOutputs; //紀錄現在的輸出設備
if (mPhoneState == AUDIO_MODE_IN_CALL) { //如果在電話中
//可見他們都是PlaybackThread的子類,將該thread添加到mPlaybackThreads中,mPlaybackThreads是一個vetor,它以id作為索引,將該線程保存起來,並返回給調用
者,后續播放聲音時候通過傳進該id(也就是audio_io_handle_t),從該vetor取就可以了。
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/); //這里mPrimaryOutput就是audio_io_handle_t句柄,調用相應的播放線程,在audioflinger.cpp中一些常用的函數,播放聲音時候首先創建播放線程
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); //得到輸出描述符
device = getDeviceForStrategy(STRATEGY_PHONE, fromCache); //得到輸出設備
updateCallRouting(newDevice);
txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION); //得到輸入源和混音器,這里是mic
outputs = getOutputsForDevice(rxDevice, mOutputs);
setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
//mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs); //改變 route ,最終會掉到 ALSAControl 中的 set 函數來設置 codec 的 switch 或者 widget 。
outputDesc->toAudioPortConfig(&patch.sources[0]); //5.0新引入的audio patch機制,單獨有一章分析
AudioPolicyManager::AudioOutputDescriptor::toAudioPortConfig
AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig); //config port
deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
AudioPolicyManager::DeviceDescriptor::toAudioPortConfig //config port
mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs); //創建AudioPatch,單獨分析
applyStreamVolumes(output, device, delayMs);
audio_io_handle_t activeInput = getActiveInput(); //得到輸入
setInputDevice(activeInput, getNewInputDevice(activeInput)); //設置輸入
特別說明:
// 調用的其實是函數 AudioPolicyService::setParameters// 會通過函數 AudioPolicyService::AudioCommandThread::parametersCommand 向 AudioCommandThread 的 command list// 添加一個 command// AudioPolicyService::AudioCommandThread::threadLoop 函數中會處理 command list 中的 command// 對於 SET_PARAMETERS command ,最終調用了函數 AudioSystem::setParameters// 調用了 AudioFlinger::setParameters 函數// 調用了 AudioFlinger::ThreadBase::setParameters 函數添加成員到 mNewParameters// 函數 AudioFlinger::MixerThread::checkForNewParameters_l 中會處理 mNewParameters 中的參數// 函數 AudioFlinger::MixerThread::threadLoop 會調用函數 AudioFlinger::MixerThread::checkForNewParameters_l
mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs); //改變 route ,最終會掉到 ALSAControl 中的 set 函數來設置 codec 的 switch 或者 widget 。
// update stream volumes according to new device
// 設置 device 上各 stream 對應的音量
// 其中的實現是遍歷各 stream ,調用函數 checkAndSetVolume 將 AudioOutputDescriptor 保存的各 stream 的音量進行設置
// checkAndSetVolume 函數的實現在后面有看
applyStreamVolumes(output, device, delayMs);
PatchPanel.cpp (frameworks\av\services\audioflinger)
/* Connect a patch between several source and sink ports */
AudioFlinger::createAudioPatch
AudioFlinger::PatchPanel::createAudioPatch
delete removedPatch; //刪除patch
Patch *newPatch = new Patch(patch); //創建一個patch
audio_module_handle_t srcModule = patch->sources[0].ext.mix.hw_module; //得到audio_module_handle_t ,應該是hw層的
ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule); //得到在mAudioHwDevs里面的index
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index); //得到HW
sp<ThreadBase> thread = audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle); //
////這個函數的意思是根據output值,從一堆線程中找到對應的那個線程:AudioFlinger.cpp (frameworks\av\services\audioflinger)
AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
//Threads.cpp (frameworks\av\services\audioflinger)
status_t AudioFlinger::ThreadBase::sendCreateAudioPatchConfigEvent
sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle);
mData = new CreateAudioPatchConfigEventData(patch, handle); //new
const struct audio_patch mPatch; //patch結構體
audio_patch_handle_t mHandle; //audio_patch_handle_t 結構體
status_t status = sendConfigEvent_l(configEvent); //
status_t AudioFlinger::ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event)
mConfigEvents.add(event); //加入到events中
mWaitWorkCV.signal(); //通過mWaitWorkCV.signal()喚醒void AudioFlinger::ThreadBase::processConfigEvents_l()
void AudioFlinger::ThreadBase::processConfigEvents_l()
event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
status = hwDevice->create_audio_patch(hwDevice, patch->num_sources, patch->sources, patch->num_sinks, patch->sinks, handle); //調用HW的模塊, 單獨分析
if (event->mCond.waitRelative(event->mLock, kConfigEventTimeoutNs) != NO_ERROR) //線程B和C的超時等待,B和C可以指定等待時間,當超過這個時間,條件卻還不滿足,則退出等待。
status = event->mStatus; //返回結果
CreateAudioPatchConfigEventData *data = (CreateAudioPatchConfigEventData *)configEvent->mData.get(); //得到CreateAudioPatchConfigEventData數據
*handle = data->mHandle; //返回audio_patch_handle_t
重要結構體:
class ConfigEvent: public RefBase {
public:
virtual ~ConfigEvent() {}
void dump(char *buffer, size_t size) { mData->dump(buffer, size); }
const int mType; // event type e.g. CFG_EVENT_IO
Mutex mLock; // mutex associated with mCond
Condition mCond; // condition for status return
status_t mStatus; // status communicated to sender
bool mWaitStatus; // true if sender is waiting for status
sp<ConfigEventData> mData; // event specific parameter data
protected:
ConfigEvent(int type) : mType(type), mStatus(NO_ERROR), mWaitStatus(false), mData(NULL) {}
};
四.調用HA層
Audio_hw_hal.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\common\hardware\audio\aud_drv)
hwDevice->create_audio_patch(hwDevice, patch->num_sources, patch->sources, patch->num_sinks, patch->sinks, handle);
static int adev_create_audio_patch //在Audio_hw_hal.cpp
ladev->hwif->createAudioPatch(num_sources,sources,num_sinks,sinks,handle);
//AudioALSAHardware.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\common\hardware\audio\v3\aud_drv)
AudioALSAHardware::createAudioPatch
if (sources[0].type == AUDIO_PORT_TYPE_MIX)
eOutDeviceList |= sinks[dDeviceIndex].ext.device.type; //得到輸出type
param.addInt(String8(AudioParameter::keyRouting), (int)eOutDeviceList); //把eOutDeviceList 放入keyRouting這個key
status = mStreamManager->setParameters(param.toString(), sources[0].ext.mix.handle);
//AudioALSAStreamManager.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\aud_drv)
AudioALSAStreamManager::setParameters
index = mStreamOutVector.indexOfKey(IOport); //stream out的handle
AudioALSAStreamOut *pAudioALSAStreamOut = mStreamOutVector.valueAt(index); //得到AudioALSAStreamOut結構體
status = pAudioALSAStreamOut->setParameters(keyValuePairs);
//AudioALSAStreamOut.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\aud_drv)
AudioALSAStreamOut::setParameters
status = mStreamManager->routingOutputDevice(mStreamAttributeSource.output_devices, static_cast<audio_devices_t>(value));
AudioALSAStreamManager::routingOutputDevice
// update the output device info for voice wakeup (even when "routing=0"), 更新信息
mAudioALSAVoiceWakeUpController->updateDeviceInfoForVoiceWakeUp();
bool bIsUseHeadsetMic = AudioMTKHeadSetMessager::getInstance()->isHeadsetPlugged(); //是否有耳機插入
setVoiceWakeUpEnable(true); //enable
mixer_ctl_set_enum_by_string(mixer_get_ctl_by_name(mMixer, "Audio_Vow_MIC_Type_Select"), "HeadsetMIC") //調用external/tinyalsa/,后面分析
// update if headset change
mHeadsetChange = CheckHeadsetChange(current_output_devices, output_devices); //看看headset是否變化
mAudioALSAVolumeController->setVoiceVolume(mAudioALSAVolumeController->getVoiceVolume(), mAudioMode , output_devices); //設置音量
AudioALSAStreamManager::setVoiceVolume
setAMPGain(ampgain, AMP_CONTROL_POINT,device); //設置
mSpeechPhoneCallController->routing( output_devices, mSpeechPhoneCallController->getInputDeviceForPhoneCall(output_devices));
//關閉原來的設備
// Set PMIC digital/analog part - uplink has pop, open first
mHardwareResourceManager->startInputDevice(input_device); //設置輸入device
// Set PMIC digital/analog part - DL need trim code.
mHardwareResourceManager->startOutputDevice(output_device, sample_rate); //設置輸出devic,后面還有很多其他操作
//AudioALSAHardwareResourceManager.cpp (vendor\mediatek\proprietary\platform\mt6735\hardware\audio\aud_drv)
AudioALSAHardwareResourceManager::startOutputDevice
OpenSpeakerPath(SampleRate);
mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HEADPHONE);
mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_EXT_SPEAKER);
mixer_ctl_set_enum_by_string(mixer_get_ctl_by_name(mMixer, cltname.string()), cltvalue.string()) //調用external/tinyalsa
五.調用tinyalsa
分析
mixer_ctl_set_enum_by_string
external/tinyalsa/mixer.c
mixer_ctl_set_enum_by_string
ret = ioctl(ctl->mixer->fd, SNDRV_CTL_IOCTL_ELEM_WRITE, &ev); //直接調用ioctl與與底層交互
