Android音頻(10)——多App同時錄音實現


一、使用c++編寫錄音程序

1. PCM音頻數據是原始音頻數據,無法使用播放器播放,需要給它加上一個頭部,表明聲音有幾個通道,采樣率是多少等等。將
PCM音頻數據轉換為WAV格式,這樣其它播放器才能播放出來。


2. 錄音時要確定3個參數
(1)采樣率:一秒鍾對聲波采樣的次數。常用的采樣率有8000,11025,22050,32000,44100.
高版本的Android應該可以支持更高的采樣率。

(2)每個采樣值使用多少bit表示
目前Android系統上固定為16bit

(3)聲道數
Stereo:立體聲,每個采樣點記錄左右聲道的值
Mono: 單聲道

3. tinyplay工具只能播放雙聲道的音頻數據。

4.測試程序
(1)AudioRecordTest.cpp,用於不做pcm數據

#include <utils/Log.h>
#include <media/AudioRecord.h>
#include <stdlib.h>

using namespace android;

 
//==============================================
//    Audio Record Defination
//==============================================

#ifdef LOG_TAG
#undef LOG_TAG
#endif

#define LOG_TAG "AudioRecordTest"
 
static pthread_t        g_AudioRecordThread;
static pthread_t *    g_AudioRecordThreadPtr = NULL;
 
volatile bool             g_bQuitAudioRecordThread = false;
volatile int                 g_iInSampleTime = 0;
int                                 g_iNotificationPeriodInFrames = 8000/10; 
// g_iNotificationPeriodInFrames should be change when sample rate changes.

static void *    AudioRecordThread(int sample_rate, int channels, void *fileName)
{
    uint64_t                          inHostTime                 = 0;
    void *                                inBuffer                     = NULL; 
    audio_source_t                 inputSource             = AUDIO_SOURCE_MIC;
    audio_format_t                 audioFormat             = AUDIO_FORMAT_PCM_16_BIT;    
    audio_channel_mask_t     channelConfig         = AUDIO_CHANNEL_IN_MONO;
    int                                     bufferSizeInBytes;
    int                                     sampleRateInHz         = sample_rate; //8000; //44100;    
    android::AudioRecord *    pAudioRecord         = NULL;
    FILE *                                     g_pAudioRecordFile         = NULL;
    char *                                         strAudioFile                 = (char *)fileName;
 
    int iNbChannels         = channels;    // 1 channel for mono, 2 channel for streo
    int iBytesPerSample = 2;     // 16bits pcm, 2Bytes
    int frameSize             = 0;    // frameSize = iNbChannels * iBytesPerSample
    size_t  minFrameCount     = 0;    // get from AudroRecord object
    int iWriteDataCount = 0;    // how many data are there write to file
    
    // log the thread id for debug info
    ALOGD("%s  Thread ID  = %d  \n", __FUNCTION__,  pthread_self());  
    g_iInSampleTime = 0;
    g_pAudioRecordFile = fopen(strAudioFile, "wb+");    
    
    //printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig);
    
    //iNbChannels = (channelConfig == AUDIO_CHANNEL_IN_STEREO) ? 2 : 1;
    if (iNbChannels == 2) {
        channelConfig = AUDIO_CHANNEL_IN_STEREO;
    }
    printf("sample_rate = %d, channels = %d, iNbChannels = %d, channelConfig = 0x%x\n", sample_rate, channels, iNbChannels, channelConfig);
    
    frameSize     = iNbChannels * iBytesPerSample;    
    
    android::status_t     status = android::AudioRecord::getMinFrameCount(
        &minFrameCount, sampleRateInHz, audioFormat, channelConfig);    
    
    if(status != android::NO_ERROR)
    {
        ALOGE("%s  AudioRecord.getMinFrameCount fail \n", __FUNCTION__);
        goto exit ;
    }
    
    ALOGE("sampleRateInHz = %d minFrameCount = %d iNbChannels = %d channelConfig = 0x%x frameSize = %d ", 
        sampleRateInHz, minFrameCount, iNbChannels, channelConfig, frameSize);    
    
    bufferSizeInBytes = minFrameCount * frameSize;
    
    inBuffer = malloc(bufferSizeInBytes); 
    if(inBuffer == NULL)
    {        
        ALOGE("%s  alloc mem failed \n", __FUNCTION__);        
        goto exit ; 
    }
 
    g_iNotificationPeriodInFrames = sampleRateInHz/10;    
    
    pAudioRecord  = new android::AudioRecord();    
    if(NULL == pAudioRecord)
    {
        ALOGE(" create native AudioRecord failed! ");
        goto exit;
    }
    
    pAudioRecord->set( inputSource,
                                    sampleRateInHz,
                                    audioFormat,
                                    channelConfig,
                                    0,
                                    NULL, //AudioRecordCallback,
                                    NULL,
                                    0,
                                    true,
                                    0); 
 
    if(pAudioRecord->initCheck() != android::NO_ERROR)  
    {
        ALOGE("AudioTrack initCheck error!");
        goto exit;
    }
     
    if(pAudioRecord->start()!= android::NO_ERROR)
    {
        ALOGE("AudioTrack start error!");
        goto exit;
    }    
    
    while (!g_bQuitAudioRecordThread)
    {
        int readLen = pAudioRecord->read(inBuffer, bufferSizeInBytes);        
        int writeResult = -1;
        
        if(readLen > 0) 
        {
            iWriteDataCount += readLen;
            if(NULL != g_pAudioRecordFile)
            {
                writeResult = fwrite(inBuffer, 1, readLen, g_pAudioRecordFile);                
                if(writeResult < readLen)
                {
                    ALOGE("Write Audio Record Stream error");
                }
            }            
 
            //ALOGD("readLen = %d  writeResult = %d  iWriteDataCount = %d", readLen, writeResult, iWriteDataCount);            
        }
        else 
        {
            ALOGE("pAudioRecord->read  readLen = 0");
        }
    }
        
exit:
    if(NULL != g_pAudioRecordFile)
    {
        fflush(g_pAudioRecordFile);
        fclose(g_pAudioRecordFile);
        g_pAudioRecordFile = NULL;
    }
 
    if(pAudioRecord)
    {
        pAudioRecord->stop();
        //delete pAudioRecord;
        //pAudioRecord == NULL;
    }
 
    if(inBuffer)
    {
        free(inBuffer);
        inBuffer = NULL;
    }
    
    ALOGD("%s  Thread ID  = %d  quit\n", __FUNCTION__,  pthread_self());
    return NULL;
}

int main(int argc, char **argv)
{
    if (argc != 4)
    {
        printf("Usage:\n");
        printf("%s <sample_rate> <channels> <out_file>\n", argv[0]);
        return -1;
    }
    AudioRecordThread(strtol(argv[1], NULL, 0), strtol(argv[2], NULL, 0), argv[3]);
    return 0;
}
View Code

(2)pcm2wav.cpp,用於將pcm轉換為wav格式

#include <stdio.h>
#include <string.h>
#include <stdlib.h>

/* https://blog.csdn.net/u010011236/article/details/53026127 */

/**
 * Convert PCM16LE raw data to WAVE format
 * @param pcmpath       Input PCM file.
 * @param channels      Channel number of PCM file.
 * @param sample_rate   Sample rate of PCM file.
 * @param wavepath      Output WAVE file.
 */
int simplest_pcm16le_to_wave(const char *pcmpath, int sample_rate, int channels, const char *wavepath)
{
    typedef struct WAVE_HEADER{
        char    fccID[4];       //ÄÚÈÝΪ""RIFF
        unsigned long dwSize;   //×îºóÌîд£¬WAVE¸ñʽÒôƵµÄ´óС
        char    fccType[4];     //ÄÚÈÝΪ"WAVE"
    }WAVE_HEADER;

    typedef struct WAVE_FMT{
        char    fccID[4];          //ÄÚÈÝΪ"fmt "
        unsigned long  dwSize;     //ÄÚÈÝΪWAVE_FMTÕ¼µÄ×Ö½ÚÊý£¬Îª16
        unsigned short wFormatTag; //Èç¹ûΪPCM£¬¸ÄֵΪ 1
        unsigned short wChannels;  //ͨµÀÊý£¬µ¥Í¨µÀ=1£¬Ë«Í¨µÀ=2
        unsigned long  dwSamplesPerSec;//²ÉÓÃÆµÂÊ
        unsigned long  dwAvgBytesPerSec;/* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */
        unsigned short wBlockAlign;//==wChannels*uiBitsPerSample/8
        unsigned short uiBitsPerSample;//ÿ¸ö²ÉÑùµãµÄbitÊý£¬8bits=8, 16bits=16
    }WAVE_FMT;

    typedef struct WAVE_DATA{
        char    fccID[4];       //ÄÚÈÝΪ"data"
        unsigned long dwSize;   //==NumSamples*wChannels*uiBitsPerSample/8
    }WAVE_DATA;

#if 0
    if(channels==2 || sample_rate==0)
    {
        channels = 2;
        sample_rate = 44100;
    }
#endif    
    int bits = 16;

    WAVE_HEADER pcmHEADER;
    WAVE_FMT    pcmFMT;
    WAVE_DATA   pcmDATA;

    unsigned short m_pcmData;
    FILE *fp, *fpout;

    fp = fopen(pcmpath, "rb+");
    if(fp==NULL)
    {
        printf("Open pcm file error.\n");
        return -1;
    }
    fpout = fopen(wavepath, "wb+");
    if(fpout==NULL)
    {
        printf("Create wav file error.\n");
        return -1;
    }

    /* WAVE_HEADER */
    memcpy(pcmHEADER.fccID, "RIFF", strlen("RIFF"));
    memcpy(pcmHEADER.fccType, "WAVE", strlen("WAVE"));
    fseek(fpout, sizeof(WAVE_HEADER), 1);   //1=SEEK_CUR
    /* WAVE_FMT */
    memcpy(pcmFMT.fccID, "fmt ", strlen("fmt "));
    pcmFMT.dwSize = 16;
    pcmFMT.wFormatTag = 1;
    pcmFMT.wChannels = channels;
    pcmFMT.dwSamplesPerSec = sample_rate;
    pcmFMT.uiBitsPerSample = bits;
    /* ==dwSamplesPerSec*wChannels*uiBitsPerSample/8 */
    pcmFMT.dwAvgBytesPerSec = pcmFMT.dwSamplesPerSec*pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8;
    /* ==wChannels*uiBitsPerSample/8 */
    pcmFMT.wBlockAlign = pcmFMT.wChannels*pcmFMT.uiBitsPerSample/8;


    fwrite(&pcmFMT, sizeof(WAVE_FMT), 1, fpout);

    /* WAVE_DATA */
    memcpy(pcmDATA.fccID, "data", strlen("data"));
    pcmDATA.dwSize = 0;
    fseek(fpout, sizeof(WAVE_DATA), SEEK_CUR);

    fread(&m_pcmData, sizeof(unsigned short), 1, fp);
    while(!feof(fp))
    {
        pcmDATA.dwSize += 2;
        fwrite(&m_pcmData, sizeof(unsigned short), 1, fpout);
        fread(&m_pcmData, sizeof(unsigned short), 1, fp);
    }

    /*pcmHEADER.dwSize = 44 + pcmDATA.dwSize;*/
    //ÐÞ¸Äʱ¼ä£º2018Äê1ÔÂ5ÈÕ
    pcmHEADER.dwSize = 36 + pcmDATA.dwSize;

    rewind(fpout);
    fwrite(&pcmHEADER, sizeof(WAVE_HEADER), 1, fpout);
    fseek(fpout, sizeof(WAVE_FMT), SEEK_CUR);
    fwrite(&pcmDATA, sizeof(WAVE_DATA), 1, fpout);

    fclose(fp);
    fclose(fpout);

    return 0;
}

int main(int argc, char **argv)
{
    if (argc != 5)
    {
        printf("Usage:\n");
        printf("%s <input pcm file> <sample_rate> <channels>  <output wav file>\n", argv[0]);
        return -1;
    }
    
    simplest_pcm16le_to_wave(argv[1], strtol(argv[2], NULL, 0), strtol(argv[3], NULL, 0), argv[4]);

    return 0;
}
View Code

(3)Android.mk

LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)

LOCAL_SRC_FILES:= \
    AudioRecordTest.cpp

LOCAL_SHARED_LIBRARIES := \
    libcutils \
    libutils \
    libmedia

LOCAL_MODULE:= audio_record_test

LOCAL_MODULE_TAGS := tests

include $(BUILD_EXECUTABLE)

include $(CLEAR_VARS)

LOCAL_SRC_FILES:= \
    pcm2wav.cpp

LOCAL_SHARED_LIBRARIES := \
    libcutils \
    libutils \
    libmedia

LOCAL_MODULE:= pcm2wav

LOCAL_MODULE_TAGS := tests

include $(BUILD_EXECUTABLE)
View Code

然后使用tinyplay播放產生的wav文件。

錄音程序參考:
Android Native C++ 層中使用AudioRecord錄制PCM音頻: https://blog.csdn.net/romantic_energy/article/details/50521970

pcm轉wav參考:
PCM、WAV格式介紹及用C語言實現PCM轉WAV: https://blog.csdn.net/u010011236/article/details/53026127

4. 耳機的只有一邊播放有聲音的原因

./AudioRecordTest 44100 2 my.pcm
./pcm2wav my.pcm 44100 2 my.wav
tinyplay my.wav
只有1個耳朵都聽到聲音

./AudioRecordTest 44100 1 my.pcm
./pcm2wav my.pcm 44100 1 my.wav
tinyplay 不能播放單聲道聲音, 用其他播放器來播放my.wav,2個耳朵都聽到聲音

為何錄音時用雙聲通,播放時只有1個耳朵有聲音?
反而錄音時用單聲通,播放時2個耳朵都有聲音?

答案:
a. 硬件上、驅動上是雙聲道的; 但是我們只接了一個MIC,所以驅動程序錄音時得到的雙聲道數據中,其中一個聲道數據恆為0
b. AudioRecordTest錄音時如果指定了雙聲道,那么得到的PCM數據里其中一個聲道恆為0,它播放時就會導致只有一個耳朵有聲音
c. AudioRecordTest錄音時如果指定了單聲道,那么得到的PCM數據只含有一個聲道數據,它是硬件左、右聲道的混合,這個混合
是AudioFlinger系統實現的.在播放時單聲道數據時, AudioFlinger系統會把單聲道數據既發給硬件Left DAC(左聲道)、也發給硬
件Right DAC(右聲道),所以2個耳朵都可以聽到聲音

 

二、錄音框架及代碼流程

1. playbackThread 就是MixerThread,多個App對應着一個線程。

2. 原生的Android錄音流程
根據App傳入的聲音來源找到對應的device
找到profile(audio_policy.conf產生的)
根據profile找到module,即對應一個聲卡,然后加載對應聲卡的HAL文件
調用HAL文件中的openInput()來打開一個輸入通道。


3. 錄音時只要App執行了set(),就會創建一個RecordThread(),多個App可能導致並發訪問聲卡,導致競爭訪
問聲卡數據的問題。

4. 錄音框架及代碼流程
a. APP創建、設置AudioRecord, 指定了聲音來源: inputSource, 比如: AUDIO_SOURCE_MIC,還指定了采樣率、通道數、格式等參數
b. AudioPolicyManager根據inputSource等參數確定錄音設備: device
c. AudioFlinger創建一個RecordThread, 以后該線程將從上述device讀取聲音數據
d. 在RecordThread內部為APP的AudioRecord創建一個對應的RecordTrack,APP的AudioRecord 與 RecordThread內部的RecordTrack 通過共享內存傳遞數據
e. RecordThread從HAL中得到數據, 再通過內部的RecordTrack把數據傳給APP的AudioRecord

注意:
在原生代碼中,APP的一個AudioRecord會導致創建一個RecordThread,在一個device上有可能存在多個RecordThread,
任意時刻只能有一個RecordThread在運行,所以只能有一個APP在錄音,不能多個APP同時錄音


三、修改代碼支持多APP同時錄音

修改AudioPolicyManager.cpp,補丁如下:

Subject: [PATCH] v2, support Multi AudioRecord at same time

---
 AudioPolicyManager.cpp | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/AudioPolicyManager.cpp b/AudioPolicyManager.cpp
index 536987a..6c87508 100644
--- a/AudioPolicyManager.cpp
+++ b/AudioPolicyManager.cpp
@@ -1356,6 +1356,17 @@ audio_io_handle_t AudioPolicyManager::getInput(audio_source_t inputSource,
     config.channel_mask = channelMask;
     config.format = format;
 
+    /* check wether have an AudioInputDescriptor use the same profile */
+    for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+        sp<AudioInputDescriptor> desc;
+        desc = mInputs.valueAt(input_index);
+        if (desc->mProfile == profile) {
+            desc->mOpenRefCount++;        // ÒýÓüÆÊý¼Ó1    
+            desc->mSessions.add(session); // session
+            return desc->mIoHandle;
+        }
+    }    
+
     status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
                                                    &input,
                                                    &config,
-- 
1.9.1
View Code

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM