录音机和录像机的输入通路

ssize_t AudioALSAStreamIn::read(void *buffer, ssize_t bytes)
{
    ssize_t ret_size = bytes;
    int tryCount = 10;

    if (mSuspendCount > 0 || ((mStreamAttributeTarget.input_source == AUDIO_SOURCE_FM_TUNER) 
        && (mStreamManager->isEchoRefUsing() == true)))
    {
        // here to sleep a buffer size latency and return.
        memset(buffer, 0, bytes);
        size_t wordSize = 0;
        switch (mStreamAttributeTarget.audio_format)
        {
            case AUDIO_FORMAT_PCM_8_BIT:
            {
                wordSize = sizeof(int8_t);
                break;
            }
            case AUDIO_FORMAT_PCM_16_BIT:
            {
                wordSize = sizeof(int16_t);
                break;
            }
            case AUDIO_FORMAT_PCM_8_24_BIT:
            case AUDIO_FORMAT_PCM_32_BIT:
            {
                wordSize = sizeof(int32_t);
                break;
            }
            default:
            {
                wordSize = sizeof(int16_t);
                break;
            }
        }
        int sleepus = ((bytes * 1000) / ((mStreamAttributeTarget.sample_rate / 1000) 
                      * mStreamAttributeTarget.num_channels * wordSize));
        usleep(sleepus);
        return bytes;
    }
    tryCount = 10;
    while(mLockCount && tryCount--) {
        usleep(300);
    }

    status_t status = NO_ERROR;
    if ((mUpdateOutputDevice == true) || (mUpdateInputDevice == true))
    {
        mUpdateOutputDevice = false;
        {
            AudioAutoTimeoutLock standbyLock(mStandbyLock);
            if (mStandby == false)
            {
                status = close();
            }
        }
        if (mUpdateInputDevice == true)
        {
            mUpdateInputDevice = false;
            mStreamAttributeTarget.input_device = mNewInputDevice;
        }
    }

    if (mStandby == true)
    {
        status = open();//打开输入设备
    }

    if (status != NO_ERROR)
    {
        ret_size = 0;
    } else
    {
        ret_size = mCaptureHandler->read(buffer, bytes);//读取数据
        WritePcmDumpData(buffer, bytes);
    }
    return ret_size;
}

读取数据之前会创建handler对象,然后打开输入设备

status_t AudioALSAStreamIn::open()
{
    status_t status = NO_ERROR;
    if (mStandby == true)
    {
        mCaptureHandler = mStreamManager->createCaptureHandler(&mStreamAttributeTarget);
        mStandby = false;
        status = mCaptureHandler->open();
        OpenPCMDump();
    }
    return status;
}

根据不同的场景(input_source和input_device)创建不同的handler

AudioALSACaptureHandlerBase *AudioALSAStreamManager::createCaptureHandler(
    stream_attribute_t *stream_attribute_target)
{
    // use primary stream out device
    const audio_devices_t current_output_devices = (mStreamOutVector.size() > 0)
                                                   ? mStreamOutVector[0]->getStreamAttribute()->output_devices
                                                   : AUDIO_DEVICE_NONE;


    // Init input stream attribute here
    stream_attribute_target->audio_mode = mAudioMode;
    stream_attribute_target->output_devices = current_output_devices;
    stream_attribute_target->micmute = mMicMute;

    // BesRecordInfo
    stream_attribute_target->BesRecord_Info.besrecord_enable = false; // default set besrecord off
    stream_attribute_target->BesRecord_Info.besrecord_bypass_dualmicprocess = mBypassDualMICProcessUL;

    // create
    AudioALSACaptureHandlerBase *pCaptureHandler = NULL;
    {
        if (stream_attribute_target->input_source == AUDIO_SOURCE_FM_TUNER)//FM广播
        {
            if (isEchoRefUsing() == true)
            {
                ALOGD("%s(), not support FM record in VoIP mode, return NULL", __FUNCTION__);
                mLock.unlock();
                return NULL;
            }
            pCaptureHandler = new AudioALSACaptureHandlerFMRadio(stream_attribute_target);
        } else if (stream_attribute_target->input_source == AUDIO_SOURCE_ANC)
        {
            pCaptureHandler = new AudioALSACaptureHandlerANC(stream_attribute_target);
        } else if (isModeInPhoneCall() == true)//打电话
        {
            pCaptureHandler = new AudioALSACaptureHandlerVoice(stream_attribute_target);
        } else if ((isModeInVoipCall() == true) 
                 || (stream_attribute_target->NativePreprocess_Info.PreProcessEffect_AECOn == true)
                 || (stream_attribute_target->input_source == AUDIO_SOURCE_VOICE_COMMUNICATION)
                 || (stream_attribute_target->input_source == AUDIO_SOURCE_CUSTOMIZATION1) //MagiASR enable AEC
                 || (stream_attribute_target->input_source == AUDIO_SOURCE_CUSTOMIZATION2)) //Normal REC with AEC
        {
            stream_attribute_target->BesRecord_Info.besrecord_enable = EnableBesRecord();
            if (mStreamInVector.size() > 1)
            {
                for (size_t i = 0; i < mStreamInVector.size(); i++)
                {
                    if (mStreamInVector[i]->getStreamAttribute()->input_source == AUDIO_SOURCE_FM_TUNER)
                    {
                        mStreamInVector[i]->standby();
                    }
                }
            }
            if (isModeInVoipCall() == true 
                || (stream_attribute_target->input_source == AUDIO_SOURCE_VOICE_COMMUNICATION))
            {
                stream_attribute_target->BesRecord_Info.besrecord_voip_enable = true;
            }

            switch (stream_attribute_target->input_device)
            {
                case AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET:
                {
                    pCaptureHandler = new AudioALSACaptureHandlerBT(stream_attribute_target);
                    break;
                }
                default:
                {
                    pCaptureHandler = new AudioALSACaptureHandlerAEC(stream_attribute_target);
                    break;
                }
            }
        } else
        {
            //enable BesRecord if not these input sources
            if ((stream_attribute_target->input_source != AUDIO_SOURCE_VOICE_UNLOCK) &&
                (stream_attribute_target->input_source != AUDIO_SOURCE_FM_TUNER) && 
                (stream_attribute_target->input_source != AUDIO_SOURCE_MATV) &&
                (stream_attribute_target->input_source != AUDIO_SOURCE_ANC) &&
                (stream_attribute_target->input_source != AUDIO_SOURCE_UNPROCESSED))
            {
                //no uplink preprocess for sample rate higher than 48k
                if ((stream_attribute_target->sample_rate > 48000) 
                     || (stream_attribute_target->audio_format != AUDIO_FORMAT_PCM_16_BIT))   
                    stream_attribute_target->BesRecord_Info.besrecord_enable = false;
                else
                    stream_attribute_target->BesRecord_Info.besrecord_enable = EnableBesRecord();//返回true
            }

            switch (stream_attribute_target->input_device)
            {
                case AUDIO_DEVICE_IN_BUILTIN_MIC://普通录音
                case AUDIO_DEVICE_IN_BACK_MIC:
                case AUDIO_DEVICE_IN_WIRED_HEADSET:
                {
                    pCaptureHandler = new AudioALSACaptureHandlerNormal(stream_attribute_target);//普通录音
                    break;
                }
                case AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET:
                {
                    pCaptureHandler = new AudioALSACaptureHandlerBT(stream_attribute_target);
                    break;
                }
                default:
                {
                    pCaptureHandler = new AudioALSACaptureHandlerNormal(stream_attribute_target);
                    break;
                }
            }
        }
    }

    // save capture handler object in vector
    pCaptureHandler->setIdentity(mCaptureHandlerIndex);
    mCaptureHandlerVector.add(mCaptureHandlerIndex, pCaptureHandler);
    mCaptureHandlerIndex++;
    return pCaptureHandler;
}

AudioALSACaptureHandlerNormal::AudioALSACaptureHandlerNormal(stream_attribute_t *stream_attribute_target) :
    AudioALSACaptureHandlerBase(stream_attribute_target)
{
    init();
}
status_t AudioALSACaptureHandlerNormal::init()
{
    mCaptureHandlerType = CAPTURE_HANDLER_NORMAL;
    return NO_ERROR;
}
status_t AudioALSACaptureHandlerNormal::open()
{
    mCaptureDataClient = new AudioALSACaptureDataClient(AudioALSACaptureDataProviderNormal::getInstance(), 
                                                        mStreamAttributeTarget);
    mHardwareResourceManager->startInputDevice(mStreamAttributeTarget->input_device);

    //============Voice UI&Unlock REFERECE=============
    AudioVUnlockDL *VUnlockhdl = AudioVUnlockDL::getInstance();
    if (VUnlockhdl != NULL)
    {
        struct timespec systemtime;
        memset(&systemtime, 0, sizeof(timespec));
        VUnlockhdl->SetUplinkStartTime(systemtime, 1);
    }
    //===========================================
    return NO_ERROR;
}
AudioALSACaptureDataClient::AudioALSACaptureDataClient(
    AudioALSACaptureDataProviderBase *pCaptureDataProvider, 
    stream_attribute_t *stream_attribute_target) :
    mCaptureDataProvider(pCaptureDataProvider),
    mIdentity(0xFFFFFFFF),
    mStreamAttributeSource(mCaptureDataProvider->getStreamAttributeSource()),
    mStreamAttributeTarget(stream_attribute_target),
    mAudioALSAVolumeController(AudioVolumeFactory::CreateAudioVolumeController()),
    mAudioSpeechEnhanceInfoInstance(AudioSpeechEnhanceInfo::getInstance()),
    mChannelRemixOp(CHANNEL_REMIX_NOP),
{
    // init member struct
    memset((void *)&mEchoRefRawDataBuf, 0, sizeof(mEchoRefRawDataBuf));
    memset((void *)&mEchoRefSrcDataBuf, 0, sizeof(mEchoRefSrcDataBuf));

    // raw data,输入设备的数据
    memset((void *)&mRawDataBuf, 0, sizeof(mRawDataBuf));
    mRawDataBuf.pBufBase = new char[kClientBufferSize];
    mRawDataBuf.bufLen   = kClientBufferSize;
    mRawDataBuf.pRead    = mRawDataBuf.pBufBase;
    mRawDataBuf.pWrite   = mRawDataBuf.pBufBase;

    // src data,重采样之后的数据
    memset((void *)&mSrcDataBuf, 0, sizeof(mSrcDataBuf));
    mSrcDataBuf.pBufBase = new char[kClientBufferSize];
    mSrcDataBuf.bufLen   = kClientBufferSize;
    mSrcDataBuf.pRead    = mSrcDataBuf.pBufBase;
    mSrcDataBuf.pWrite   = mSrcDataBuf.pBufBase;

    // processed data,处理之后的数据
    memset((void *)&mProcessedDataBuf, 0, sizeof(mProcessedDataBuf));
    mProcessedDataBuf.pBufBase = new char[kClientBufferSize];
    mProcessedDataBuf.bufLen   = kClientBufferSize;
    mProcessedDataBuf.pRead    = mProcessedDataBuf.pBufBase;
    mProcessedDataBuf.pWrite   = mProcessedDataBuf.pBufBase;

    mBesRecordStereoMode = false;
    mBypassBesRecord = false;
    mNeedBesRecordSRC = false;
    mBliSrcHandler1 = NULL;
    mBliSrcHandler2 = NULL;
    mBesRecSRCSizeFactor = 1;
    mBesRecSRCSizeFactor2 = 1;
    dropBesRecordDataSize = 0;
    mFirstSRC = true;
    mFirstEchoSRC = true;
    mDropMs = 0;

    mSpeechProcessMode = SPE_MODE_REC;
    mVoIPSpeechEnhancementMask = mStreamAttributeTarget->BesRecord_Info.besrecord_dynamic_mask;

    //BesRecord Config
    mSPELayer = new SPELayer();
    SetCaptureGain();

    if (mStreamAttributeTarget->BesRecord_Info.besrecord_enable)
    {
        LoadBesRecordParams();
        mSPELayer->SetVMDumpEnable(mStreamAttributeTarget->BesRecord_Info.besrecord_tuningEnable ||
                                        mStreamAttributeTarget->BesRecord_Info.besrecord_dmnr_tuningEnable);
        mSPELayer->SetVMDumpFileName(mStreamAttributeTarget->BesRecord_Info.besrecord_VMFileName);
        mSPELayer->SetPlatfromTimeOffset(ECHOREF_TIME_OFFSET); //Default -4ms EchoRef data

        ConfigBesRecordParams(); //配置BesRecord对象的参数
        StartBesRecord();
        if((stream_attribute_target->BesRecord_Info.besrecord_voip_enable == true) ||
            (stream_attribute_target->BesRecord_Info.besrecord_ForceMagiASREnable == true) ||
            (stream_attribute_target->BesRecord_Info.besrecord_ForceAECRecEnable == true))
        {
            ALOGD("sample rate = %d, drop ms = %d, channels = %d, byts per sample = %d, 
            dropBesRecordDataSize = %d\n",
            stream_attribute_target->sample_rate, 
            stream_attribute_target->num_channels, 
            audio_bytes_per_sample(stream_attribute_target->audio_format), 
            dropBesRecordDataSize);
    } else {
        CheckBesRecordStereoModeEnable();
    }

    //Android Native Preprocess effect +++
    mAudioPreProcessEffect = NULL;
    mAudioPreProcessEffect = new AudioPreProcess(mStreamAttributeTarget);
    CheckNativeEffect();
    //Android Native Preprocess effect ---

    // attach client to capture data provider
    mCaptureDataProvider->attach(this); // 设置mStreamAttributeSource

    //assume starts after PCM open
    mSPELayer->SetUPLinkDropTime(CAPTURE_DROP_MS);
    mSPELayer->SetUPLinkIntrStartTime(GetSystemTime(false));

    //根据源数据和目标数据的采样率确定是否需要重采样
    if (mStreamAttributeSource->sample_rate != mStreamAttributeTarget->sample_rate)
    {
        SRC_PCM_FORMAT  SrcFormat = mStreamAttributeTarget->audio_format == 
                                    AUDIO_FORMAT_PCM_16_BIT ? SRC_IN_Q1P15_OUT_Q1P15 :SRC_IN_Q1P31_OUT_Q1P31;
        mBliSrc = newMtkAudioSrc(
                mStreamAttributeSource->sample_rate, mStreamAttributeSource->num_channels,
                mStreamAttributeTarget->sample_rate, mStreamAttributeSource->num_channels,
                SrcFormat);
        mBliSrc->open();
    }
    if (mStreamAttributeTarget->BesRecord_Info.besrecord_enable)
    {
        //move CheckNeedBesRecordSRC to here for mStreamAttributeSource info
        CheckNeedBesRecordSRC();//判断BesRecord是否需要重采样,录音机和录像机都不需要
    }

    CheckChannelRemixOp();//判断是否需要声道转换
}

配置BesRecord对象的参数

void AudioALSACaptureDataClient::ConfigBesRecordParams(void)
{
    AppOps* appOps = appOpsGetInstance();
    AppHandle* pAppHandle = appOps->appHandleGetInstance();//初始化audio_param的XML文件解析对象,用于参数读取
    AudioType* VoIPAudioType = appOps->appHandleGetAudioTypeByName(pAppHandle, VOIP_AUDIO_TYPE);
    AudioType* VoIPDmnrAudioType = appOps->appHandleGetAudioTypeByName(pAppHandle, VOIPDMNR_AUDIO_TYPE);
    AudioType* VoIPGeneralAudioType = appOps->appHandleGetAudioTypeByName(pAppHandle, VOIPGENERAL_AUDIO_TYPE);
    AudioType* RecordAudioType = appOps->appHandleGetAudioTypeByName(pAppHandle, RECORD_AUDIO_TYPE);
    AudioType* RecordFirAudioType = appOps->appHandleGetAudioTypeByName(pAppHandle, RECORDFIR_AUDIO_TYPE);
    AudioType* RecordDmnrAudioType = appOps->appHandleGetAudioTypeByName(pAppHandle, RECORDDMNR_AUDIO_TYPE);

    std::string categoryPath = "";
    uWord32 BesRecordEnhanceParas[EnhanceParasNum] = {0};
    Word16 BesRecordCompenFilter[CompenFilterNum] = {0};
    Word16 BesRecordDMNRParam[DMNRCalDataNum] = {0};
    bool bVoIPEnable = IsVoIPEnable();
    int RoutePath = GetBesRecordRoutePath();//获得路由ROUTE_SPEAKER
    SPE_MODE mode = mSpeechProcessMode;
    mBesRecordStereoMode = false;

    // Get mSpeechProcessMode
    if (bVoIPEnable)//VOIP通话
    {
        mode = SPE_MODE_VOIP;
        mSpeechProcessMode = mode;
    } else if ((mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION1)//MagiASR need AEC
             || (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION2))//Normal Record+AEC
    {
        mode = SPE_MODE_AECREC;
        mSpeechProcessMode = mode;
    } else {//普通录音
        mode = SPE_MODE_REC;
        mSpeechProcessMode = mode;//设置mode为SPE_MODE_REC
    }

    if (bVoIPEnable)//VOIP通话
    {
        // Get VoIP category path
        if (RoutePath == ROUTE_BT)
        {
            categoryPath = VOIP_BT_PATH;
        } else if (RoutePath == ROUTE_EARPHONE)
        {
            categoryPath = VOIP_3POLE_HEADSET_PATH;
        } else if (RoutePath == ROUTE_HEADSET)
        {
            switch (AudioALSAHardwareResourceManager::getInstance()->getNumOfHeadsetPole())
            {
            case 4:
                categoryPath = VOIP_4POLE_HEADSET_PATH;
                break;
            case 5:
                if (AudioALSAHardware::GetInstance()->getParameters(keyANC_runing) == "ANC_running=true") {
                    categoryPath += "," VOIP_5POLE_HEADSET_ANC_PATH;
                } else {
                    categoryPath += "," VOIP_5POLE_HEADSET_PATH;
                }
                break;
            }
        } else if (RoutePath == ROUTE_SPEAKER)
        {
            if ( appOps->appHandleIsFeatureOptionEnabled(pAppHandle, VOIP_HANDSFREE_DMNR_SUPPORT_FO) == 1) {
                categoryPath = VOIP_HANDSFREE_NR_PATH;
            } else {
                categoryPath = VOIP_HANDSFREE_NO_NR_PATH;
            }
        } else
        {
            if ( appOps->appHandleIsFeatureOptionEnabled(pAppHandle, VOIP_NORMAL_DMNR_SUPPORT_FO) == 1) {
                categoryPath = VOIP_HANDSET_DMNR_PATH;
            } else {
                categoryPath = VOIP_HANDSET_NO_DMNR_PATH;
            }
        }
    } else {//普通录音
        // Get Record category path
        if ((mStreamAttributeTarget->input_source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
                                 mStreamAttributeTarget->BesRecord_Info.besrecord_tuning16K)
        {
            categoryPath += RECORD_VR_PATH;
        } else if (mStreamAttributeTarget->input_source == AUDIO_SOURCE_VOICE_UNLOCK)
        {
            categoryPath += RECORD_VOICE_UNLOCK_PATH;
            CheckBesRecordStereoModeEnable();
        } else if (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION1)
        {
            categoryPath += RECORD_ASR_PATH;
        } else if (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION2)
        {
            categoryPath += RECORD_CUSTOMIZATION2_PATH;
        } else if (mStreamAttributeTarget->input_source == AUDIO_SOURCE_UNPROCESSED)
        {
            categoryPath += RECORD_UNPROCESSED_PATH;
        } else {
            // Sound/Video recording, Get application from besrecord_scene
            switch(mStreamAttributeTarget->BesRecord_Info.besrecord_scene)
            {
            case 1:
                categoryPath += RECORD_SND_REC_NORMAL_PATH;
                CheckBesRecordStereoModeEnable();
                break;
            case 2:
                categoryPath += RECORD_SND_REC_MEETING_PATH;
                CheckBesRecordStereoModeEnable();
                break;
            case 3:
                categoryPath += RECORD_SND_REC_LECTURE_PATH;
                CheckBesRecordStereoModeEnable();
                break;
            case 4:
                categoryPath += RECORD_CAM_REC_NORMAL_PATH;
                CheckBesRecordStereoModeEnable();
                break;
            case 5:
                categoryPath += RECORD_CAM_REC_MEETING_PATH;
                break;
            default:
                if (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CAMCORDER)
                {
                    categoryPath += RECORD_CAM_REC_NORMAL_PATH;//录像机录音
                    CheckBesRecordStereoModeEnable();
                } else {
                    categoryPath += RECORD_SND_REC_NORMAL_PATH;//录音机录音
                    CheckBesRecordStereoModeEnable();
                }
                break;
            }
        }

        if (RoutePath == ROUTE_BT)//蓝牙录音
        {
            categoryPath += "," RECORD_BT_PATH;
        } else if (RoutePath == ROUTE_HEADSET)//耳机录音
        {
            switch (AudioALSAHardwareResourceManager::getInstance()->getNumOfHeadsetPole())
            {
            case 4:
                categoryPath += "," RECORD_4POLE_HEADSET_PATH;
                break;
            case 5:
                if (AudioALSAHardware::GetInstance()->getParameters(keyANC_runing) == "ANC_running=true") {
                    categoryPath += "," RECORD_5POLE_HEADSET_ANC_PATH;
                } else {
                    categoryPath += "," RECORD_5POLE_HEADSET_PATH;
                }
                break;
            }
        } else if (RoutePath == ROUTE_SPEAKER)
        {
            categoryPath += "," RECORD_HANDSET_PATH;//主辅麦录音
        } else {
            categoryPath += "," RECORD_HANDSET_PATH;
        }
    }

    // set speech parameters+++
    if (mode == SPE_MODE_VOIP || mStreamAttributeTarget->BesRecord_Info.besrecord_dmnr_tuningEnable == true)
    {
        pParamUnit = appOps->audioTypeGetParamUnit(VoIPAudioType, categoryPath.c_str());
        pSpeciParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_PARAM);
    } else {//普通录音
         // record case
        pParamUnit = appOps->audioTypeGetParamUnit(RecordAudioType, categoryPath.c_str());
        pSpeciParam = appOps->paramUnitGetParamByName(pParamUnit, RECORD_PARAM);
    }
    //common parameters as same as VoIP's
    pParamUnit = appOps->audioTypeGetParamUnit(VoIPGeneralAudioType, VOIP_COMMON_PATH);
    pCommonParam = appOps->paramUnitGetParamByName(pParamUnit, VOIPGENERAL_PARAM_NAME);

    //pSpeciParam + pCommonParam
    memcpy(BesRecordEnhanceParas, (uWord32*)pSpeciParam->data, pSpeciParam->arraySize * sizeof(uWord32));
    memcpy(&BesRecordEnhanceParas[pSpeciParam->arraySize], (uWord32*)pCommonParam->data, 
           pCommonParam->arraySize * sizeof(uWord32));
    mSPELayer->SetEnhPara(mode, BesRecordEnhanceParas);
    //speech parameters---

    //FIR parameters+++
    if (mStreamAttributeTarget->BesRecord_Info.besrecord_dmnr_tuningEnable == true)
    {
        pParamUnit = appOps->audioTypeGetParamUnit(VoIPAudioType, categoryPath.c_str());
        pInFirParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_IN_FIR_PARAM);
        memcpy(BesRecordCompenFilter, (Word16*)pInFirParam->data, 
               pInFirParam->arraySize * sizeof(Word16));   // UL1
        memcpy(&BesRecordCompenFilter[pInFirParam->arraySize], (Word16*)pInFirParam->data, 
               pInFirParam->arraySize * sizeof(Word16));
    } else if (mode == SPE_MODE_VOIP)
    {
        pParamUnit = appOps->audioTypeGetParamUnit(VoIPAudioType, categoryPath.c_str());
        pInFirParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_IN_FIR_PARAM);
        pOutFirParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_OUT_FIR_PARAM);

        // VoIP FIR parameter have 3 FIR parameter, but 1 FIR parameter work for SWIP limitation.
        memcpy(BesRecordCompenFilter, (Word16*)pInFirParam->data, 
               pInFirParam->arraySize * sizeof(Word16));   // UL1
        memcpy(&BesRecordCompenFilter[pInFirParam->arraySize], (Word16*)pInFirParam->data, 
               pInFirParam->arraySize * sizeof(Word16));
        memcpy(&BesRecordCompenFilter[pInFirParam->arraySize*2], (Word16*)pOutFirParam->data, 
                pOutFirParam->arraySize * sizeof(Word16));   // DL
    } else {//普通录音的参数读取,设置FIR滤波器
        // Record 2 FIR param is work
        pParamUnit = appOps->audioTypeGetParamUnit(RecordFirAudioType, categoryPath.c_str());
        pInFir1Param = appOps->paramUnitGetParamByName(pParamUnit, RECORD_IN_FIR1_PARAM);
        pInFir2Param = appOps->paramUnitGetParamByName(pParamUnit, RECORD_IN_FIR2_PARAM);

        memcpy(BesRecordCompenFilter, (Word16*)pInFir1Param->data, 
               pInFir1Param->arraySize * sizeof(Word16));   // UL1
        memcpy(&BesRecordCompenFilter[pInFir1Param->arraySize], (Word16*)pInFir2Param->data, 
               pInFir2Param->arraySize * sizeof(Word16));   // UL2
    }
    mSPELayer->SetCompFilter(mode, BesRecordCompenFilter);
    //FIR parameters---

    //DMNR parameters+++
    if (((QueryFeatureSupportInfo()& SUPPORT_DUAL_MIC) > 0) 
       && (mStreamAttributeTarget->BesRecord_Info.besrecord_bypass_dualmicprocess == false))
    {
        //DMNR parameters
        //google default input source AUDIO_SOURCE_VOICE_RECOGNITION not using DMNR (on/off by parameters)
        if (((mStreamAttributeTarget->input_source == AUDIO_SOURCE_VOICE_RECOGNITION) 
            || (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION1) 
            || mStreamAttributeTarget->BesRecord_Info.besrecord_tuning16K 
            || mStreamAttributeTarget->BesRecord_Info.besrecord_dmnr_tuningEnable) 
            && ((QueryFeatureSupportInfo()& SUPPORT_ASR) > 0))
        {
            pParamUnit = appOps->audioTypeGetParamUnit(RecordDmnrAudioType, categoryPath.c_str());
            pDmnrParam = appOps->paramUnitGetParamByName(pParamUnit, RECORD_DMNR_PARAM);
            memcpy(BesRecordDMNRParam, (Word16*)pDmnrParam->data, pDmnrParam->arraySize * sizeof(Word16));
        } else if (mode == SPE_MODE_VOIP)
        {
            //receiver path
            if ((RoutePath == ROUTE_NORMAL) && ((QueryFeatureSupportInfo()& SUPPORT_VOIP_NORMAL_DMNR) > 0)
                && CheckDynamicSpeechEnhancementMaskOnOff(VOIP_SPH_ENH_DYNAMIC_MASK_DMNR))
            {
                pParamUnit = appOps->audioTypeGetParamUnit(VoIPDmnrAudioType, categoryPath.c_str());
                pDmnrParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_DMNR_PARAM);

                memcpy(BesRecordDMNRParam, (Word16*)pDmnrParam->data, pDmnrParam->arraySize * sizeof(Word16));
                SetDMNREnable(DMNR_NORMAL, true);
            }
            //speaker path
            else if ((RoutePath == ROUTE_SPEAKER) && ((QueryFeatureSupportInfo()& SUPPORT_VOIP_HANDSFREE_DMNR) > 0)
                     && CheckDynamicSpeechEnhancementMaskOnOff(VOIP_SPH_ENH_DYNAMIC_MASK_LSPK_DMNR))
            {
                pParamUnit = appOps->audioTypeGetParamUnit(VoIPDmnrAudioType, categoryPath.c_str());
                pDmnrParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_DMNR_PARAM);

                memcpy(BesRecordDMNRParam, (Word16*)pDmnrParam->data, pDmnrParam->arraySize * sizeof(Word16));
                SetDMNREnable(DMNR_HANDSFREE, true);
            } else
            {
                pParamUnit = appOps->audioTypeGetParamUnit(VoIPDmnrAudioType, VOIP_NO_DMNR_PATH);
                pDmnrParam = appOps->paramUnitGetParamByName(pParamUnit, VOIP_DMNR_PARAM);

                memcpy(BesRecordDMNRParam, (Word16*)pDmnrParam->data, pDmnrParam->arraySize * sizeof(Word16));
                SetDMNREnable(DMNR_DISABLE, false);
            }
        } else //普通录音的消噪
        {
            pParamUnit = appOps->audioTypeGetParamUnit(RecordDmnrAudioType, RECORD_NO_DMNR_PATH);
            pDmnrParam = appOps->paramUnitGetParamByName(pParamUnit, RECORD_DMNR_PARAM);

            memcpy(BesRecordDMNRParam, (Word16*)pDmnrParam->data, pDmnrParam->arraySize * sizeof(Word16));
        }
        mSPELayer->SetDMNRPara(mode, BesRecordDMNRParam);
    } else
    {
        // no DMNR support DMNR disabled
        pParamUnit = appOps->audioTypeGetParamUnit(RecordDmnrAudioType, RECORD_NO_DMNR_PATH);
        pDmnrParam = appOps->paramUnitGetParamByName(pParamUnit, RECORD_DMNR_PARAM);

        memcpy(BesRecordDMNRParam, (Word16*)pDmnrParam->data, pDmnrParam->arraySize * sizeof(Word16));
        mSPELayer->SetDMNRPara(mode, BesRecordDMNRParam);
        SetDMNREnable(DMNR_DISABLE, false);
    }
    //DMNR parameters---

    //need to config as 16k sample rate for voice recognition
    //(Google's will use 48K preprocess instead) or VoIP or REC+AEC
    if ((mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION1)
        || (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION2)
        || (mStreamAttributeTarget->BesRecord_Info.besrecord_tuning16K == true) 
        || (IsVoIPEnable() == true))
    {
        if (mode == SPE_MODE_VOIP) //VoIP case
        {
            mSPELayer->SetSampleRate(mode, VOICE_RECOGNITION_RECORD_SAMPLE_RATE);
            mSPELayer->SetAPPTable(mode, WB_VOIP);
        } else    //voice recognition case
        {
            mSPELayer->SetSampleRate(mode, VOICE_RECOGNITION_RECORD_SAMPLE_RATE);
            if (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION2)
            {
                mSPELayer->SetAPPTable(mode, MONO_AEC_RECORD);   //set library do AEC Record
            } else//set library do voice recognition process or MagiASR
            {
                mSPELayer->SetAPPTable(mode, SPEECH_RECOGNITION);   
            }
        }
    } else    //normal record  use 48k
    {
        mSPELayer->SetSampleRate(mode, HD_RECORD_SAMPLE_RATE);//设置48k的采样率
        if (mBesRecordStereoMode)//set library do stereo process,双声道录音
        {
            mSPELayer->SetAPPTable(mode, STEREO_RECORD);
        } else //set library do mono process,单声道录音
        {
            mSPELayer->SetAPPTable(mode, MONO_RECORD);
        }
    }

    mSPELayer->SetRoute((SPE_ROUTE)RoutePath);//设置输出路由为ROUTE_SPEAKER

    //设置MIC的录音音量
    long gain = mAudioALSAVolumeController->GetSWMICGain();
    uint8_t TotalGain = mAudioALSAVolumeController->GetULTotalGain();
    if (mStreamAttributeTarget->input_device == AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)
    {
        gain = 0;
        TotalGain = 0;
        ALOGD("BT path set Digital MIC gain = 0");
    }
    mSPELayer->SetMICDigitalGain(mode, gain);
    mSPELayer->SetUpLinkTotalGain(mode, TotalGain);
    ALOGD("-%s()", __FUNCTION__);
}

判断输入设备到BesRecord是否需要重采样

bool AudioALSACaptureDataClient::CheckNeedBesRecordSRC()
{
    uint32_t BesRecord_usingsamplerate = HD_RECORD_SAMPLE_RATE;

    if (mStreamAttributeTarget->BesRecord_Info.besrecord_enable == true)
    {
        //BesRecord need 16K sample rate data 
        //(Google's voice recognition will use 48K process due to new CTS test case)
        if ((mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION1)
            || (mStreamAttributeTarget->input_source == AUDIO_SOURCE_CUSTOMIZATION2)
            || (mStreamAttributeTarget->BesRecord_Info.besrecord_tuning16K == true) 
            || (IsVoIPEnable() == true))
        {
            BesRecord_usingsamplerate = VOICE_RECOGNITION_RECORD_SAMPLE_RATE;
            //need src if the stream source sample rate are not the same with BesRecord needed
            if ((mStreamAttributeSource->sample_rate  != VOICE_RECOGNITION_RECORD_SAMPLE_RATE) 
                 || (mStreamAttributeSource->num_channels != 2))
            {
                mNeedBesRecordSRC = true;
            } else
            {
                mNeedBesRecordSRC = false;
            }
        } else    //BesRecord need 48K sample rate data
        {
            //need src if the stream source sample rate are not the same with BesRecord needed
            if ((mStreamAttributeSource->sample_rate  != HD_RECORD_SAMPLE_RATE)  
            || (mStreamAttributeSource->num_channels != 2))
            {
                mNeedBesRecordSRC = true;
                BesRecord_usingsamplerate = HD_RECORD_SAMPLE_RATE;
            } else //录像机和录音机,不需要重采样
            {
                mNeedBesRecordSRC = false;
            }
        }

        //if need to do BesRecord SRC
        {
            // Need SRC from stream target to BesRecord needed
            if ((mStreamAttributeSource->sample_rate != BesRecord_usingsamplerate) 
                || (mStreamAttributeSource->num_channels != 2))
            {

                mBliSrcHandler1 = newMtkAudioSrc(mStreamAttributeSource->sample_rate, 
                                                 mStreamAttributeSource->num_channels,
                                                 BesRecord_usingsamplerate, 2, 
                                                 SRC_IN_Q1P15_OUT_Q1P15);
                mBliSrcHandler1->open();
            }

            mBesRecSRCSizeFactor = ((BesRecord_usingsamplerate * 2) / (mStreamAttributeSource->sample_rate * 
                                    mStreamAttributeSource->num_channels)) + 1;

            // Need SRC from BesRecord to stream target needed
            if (mStreamAttributeTarget->sample_rate != BesRecord_usingsamplerate)
            {
                mBliSrcHandler2 = newMtkAudioSrc(BesRecord_usingsamplerate, 2,
                                  mStreamAttributeTarget->sample_rate, 2, SRC_IN_Q1P15_OUT_Q1P15);
                mBliSrcHandler2->open();
            }
            mBesRecSRCSizeFactor2 = ((mStreamAttributeTarget->sample_rate * 2) / (BesRecord_usingsamplerate * 2)) + 1;
        }
    } else
    {
        mNeedBesRecordSRC = false;
    }
    return mNeedBesRecordSRC;
}

关联DataProvider对象和DataClient对象,DataProvider可以为多个DataClient提供数据

void AudioALSACaptureDataProviderBase::attach(AudioALSACaptureDataClient *pCaptureDataClient)
{
    pCaptureDataClient->setIdentity(mCaptureDataClientIndex);
    mCaptureDataClientVector.add(pCaptureDataClient->getIdentity(), pCaptureDataClient);
    mCaptureDataClientIndex++;
    // open pcm interface when 1st attach
    if (mCaptureDataClientVector.size() == 1)
    {
        mOpenIndex++;
        open();//打开输入设备
    }
}

打开输入设备,创建读取线程

status_t AudioALSACaptureDataProviderNormal::open()
{
    ALOGD("%s()", __FUNCTION__);
    int pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmUl1Capture);
    int cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmUl1Capture);
    ALOGD("%s cardindex = %d  pcmindex = %d", __FUNCTION__, cardindex, pcmindex);

    struct pcm_params *params;
    params = pcm_params_get(cardindex, pcmindex,  PCM_IN);
    unsigned int buffersizemax = pcm_params_get_max(params, PCM_PARAM_BUFFER_BYTES);
    ALOGD("buffersizemax = %d", buffersizemax);
    pcm_params_free(params);

    bool bHifiRecord = AudioSpeechEnhanceInfo::getInstance()->GetHifiRecord();
    ALOGD("bHifiRecord = %d", bHifiRecord);
    //debug++
    btempDebug = AudioSpeechEnhanceInfo::getInstance()->GetDebugStatus();
    ALOGD("btempDebug = %d", btempDebug);
    //debug--
    mConfig.channels = 2;
    mConfig.period_count = 4;
    mConfig.rate = 48000;
    uint32_t latency = getLatencyTime();

    if (bHifiRecord == true)
    {
       mConfig.rate = 96000;
    }

    if (latency == UPLINK_LOW_LATENCY_MS)
    {
       mConfig.period_count = 8; // 2*(20ms/5ms);
    }

    mConfig.format = PCM_FORMAT_S16_LE;
    mStreamAttributeSource.audio_format = AUDIO_FORMAT_PCM_16_BIT;
    // (UL)48K\5ms data\stereo\4byte\(Align64byte)
    kReadBufferSize = (((uint32_t)(mConfig.rate / 1000 * latency * mConfig.channels * 
                      (pcm_format_to_bits(mConfig.format) / 8))) & 0xFFFFFFC0);
    mConfig.period_size = (buffersizemax / mConfig.channels / 
                          (pcm_format_to_bits(mConfig.format) / 8) / mConfig.period_count;

    mConfig.start_threshold = 0;
    mConfig.stop_threshold = 0;
    mConfig.silence_threshold = 0;

    mCaptureDataProviderType = CAPTURE_PROVIDER_NORMAL;
    mCaptureDropSize = 0;

    // config attribute (will used in client SRC/Enh/... later) // TODO(Harvey): query this
    mStreamAttributeSource.audio_channel_mask = AUDIO_CHANNEL_IN_STEREO;
    mStreamAttributeSource.num_channels = 
        android_audio_legacy::AudioSystem::popCount(mStreamAttributeSource.audio_channel_mask);
    mStreamAttributeSource.sample_rate = mConfig.rate;  //48000;

    // Reset frames readed counter
    mStreamAttributeSource.Time_Info.total_frames_readed = 0;

    ALOGD("%s(), mCaptureDropSize=%d, CAPTURE_DROP_MS=%d", __FUNCTION__, mCaptureDropSize, CAPTURE_DROP_MS);
    ALOGD("%s(), period_count=%d, period_size=%d, samplerate = %d", __FUNCTION__, mConfig.period_count, 
                 mConfig.period_size, mConfig.rate);

    OpenPCMDump(LOG_TAG);

    // enable pcm
    int pcmIdx = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmUl1Capture);
    int cardIdx = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmUl1Capture);
    mPcm = pcm_open(cardIdx, pcmIdx, PCM_IN | PCM_MONOTONIC, &mConfig);
    ALOGV("%s(), mPcm = %p", __FUNCTION__, mPcm);
    pcm_start(mPcm);

    // create reading thread
    mEnable = true;
    int ret = pthread_create(&hReadThread, NULL, AudioALSACaptureDataProviderNormal::readThread, (void *)this);
    return NO_ERROR;
}

检查是否需要进行重新混音

void AudioALSACaptureDataClient::CheckChannelRemixOp(void)
{
    uint32_t targetChannel = mStreamAttributeTarget->num_channels;
    uint32_t sourceChannel = mStreamAttributeSource->num_channels;
    //支持BesRecord功能,SUPPORT_HD_RECORD
    if (mStreamAttributeTarget->BesRecord_Info.besrecord_enable) {
        if (targetChannel == 1) {//录音机,双声道变成单声道
            mChannelRemixOp = CHANNEL_STEREO_DOWNMIX_L_ONLY;
        } else if (targetChannel == 2 && !mBesRecordStereoMode) {
            // speech enhancement output data is mono, need to convert to stereo
            mChannelRemixOp = CHANNEL_STEREO_CROSSMIX_L2R;
        } else {//录像机,不需要混音
            mChannelRemixOp = CHANNEL_REMIX_NOP;
        }
    } else {
        if (targetChannel == 1 && sourceChannel == 2) {
            /* For unprocessed audio source, the down channel should refer to L ch only, don't do the channel mix */
            if (mBesRecordStereoMode && mStreamAttributeTarget->input_source != AUDIO_SOURCE_UNPROCESSED) {
                mChannelRemixOp = CHANNEL_STEREO_DOWNMIX;
            } else {
                mChannelRemixOp = CHANNEL_STEREO_DOWNMIX_L_ONLY;
            }
        } else if (targetChannel == 2 && sourceChannel == 1) {
            mChannelRemixOp = CHANNEL_MONO_TO_STEREO;
        } else if (targetChannel == 2 && sourceChannel == 2) {
            if (mBesRecordStereoMode) {
                mChannelRemixOp = CHANNEL_REMIX_NOP;
            } else {
                mChannelRemixOp = CHANNEL_STEREO_CROSSMIX_L2R;
            }
        } else {
            mChannelRemixOp = CHANNEL_REMIX_NOP;
        }
    }
}

读取输入设备的线程,不停的读取mPcmReadBuf里的数据

void *AudioALSACaptureDataProviderNormal::readThread(void *arg)
{
    pthread_detach(pthread_self());

    status_t retval = NO_ERROR;
    AudioALSACaptureDataProviderNormal *pDataProvider = static_cast(arg);

    uint32_t open_index = pDataProvider->mOpenIndex;

    char nameset[32];
    sprintf(nameset, "%s%d", __FUNCTION__, pDataProvider->mCaptureDataProviderType);
    prctl(PR_SET_NAME, (unsigned long)nameset, 0, 0, 0);

    // read raw data from alsa driver
    char linear_buffer[kReadBufferSize];
    uint32_t Read_Size = kReadBufferSize;
    uint32_t kReadBufferSize_new;
    while (pDataProvider->mEnable == true)
    {
        retval = pDataProvider->mEnableLock.lock_timeout(500);
        if (pDataProvider->mEnable == false)
        {
            pDataProvider->mEnableLock.unlock();
            break;
        }

        clock_gettime(CLOCK_REALTIME, &pDataProvider->mNewtime);
        pDataProvider->timerec[0] = calc_time_diff(pDataProvider->mNewtime, pDataProvider->mOldtime);
        pDataProvider->mOldtime = pDataProvider->mNewtime;

        if (pDataProvider->mCaptureDropSize > 0)
        {
            Read_Size = (pDataProvider->mCaptureDropSize > kReadBufferSize) ? 
                         kReadBufferSize : pDataProvider->mCaptureDropSize;
            int retval = pcm_read(pDataProvider->mPcm, linear_buffer, Read_Size);
            pDataProvider->mCaptureDropSize -= Read_Size;
            pDataProvider->mEnableLock.unlock();
            continue;
        } else
        {
            int retval = pcm_read(pDataProvider->mPcm, linear_buffer, kReadBufferSize);
        }
        clock_gettime(CLOCK_REALTIME, &pDataProvider->mNewtime);
        pDataProvider->timerec[1] = calc_time_diff(pDataProvider->mNewtime, pDataProvider->mOldtime);
        pDataProvider->mOldtime = pDataProvider->mNewtime;

        //struct timespec tempTimeStamp;
        pDataProvider->GetCaptureTimeStamp(&pDataProvider->mStreamAttributeSource.Time_Info, kReadBufferSize);

        // use ringbuf format to save buffer info
        pDataProvider->mPcmReadBuf.pBufBase = linear_buffer;
        pDataProvider->mPcmReadBuf.bufLen   = kReadBufferSize + 1; // +1: avoid pRead == pWrite
        pDataProvider->mPcmReadBuf.pRead    = linear_buffer;
        pDataProvider->mPcmReadBuf.pWrite   = linear_buffer + kReadBufferSize;
        pDataProvider->mEnableLock.unlock();
        pDataProvider->provideCaptureDataToAllClients(open_index);//处理mPcmReadBuf里的输入数据

        clock_gettime(CLOCK_REALTIME, &pDataProvider->mNewtime);
        pDataProvider->timerec[2] = calc_time_diff(pDataProvider->mNewtime, pDataProvider->mOldtime);
        pDataProvider->mOldtime = pDataProvider->mNewtime;
    }
    pthread_exit(NULL);
    return NULL;
}

处理输入到BesRecord的数据mPcmReadBuf,最后将处理好的数据保存在pSrcDataLinearBuf

void AudioALSACaptureDataProviderBase::provideCaptureDataToAllClients(const uint32_t open_index)
{
    AudioALSACaptureDataClient *pCaptureDataClient = NULL;
    WritePcmDumpData();
    for (size_t i = 0; i < mCaptureDataClientVector.size(); i++)
    {
        pCaptureDataClient = mCaptureDataClientVector[i];
        pCaptureDataClient->copyCaptureDataToClient(mPcmReadBuf);
    }
}
uint32_t AudioALSACaptureDataClient::copyCaptureDataToClient(RingBuf pcm_read_buf)
{
    uint32_t freeSpace = RingBuf_getFreeSpace(&mRawDataBuf);
    uint32_t dataSize = RingBuf_getDataCount(&pcm_read_buf);
    RingBuf_copyFromRingBuf(&mRawDataBuf, &pcm_read_buf, dataSize);//pcm_read_bu>>mRawDataBuf

    // SRC
    uint32_t kNumRawData = RingBuf_getDataCount(&mRawDataBuf);
    uint32_t num_free_space = RingBuf_getFreeSpace(&mSrcDataBuf);

    //BesRecord PreProcess effect
    if (((mStreamAttributeTarget->BesRecord_Info.besrecord_enable) && !mBypassBesRecord))
    {
        char *pRawDataLinearBuf = new char[kNumRawData];
        RingBuf_copyToLinear(pRawDataLinearBuf, &mRawDataBuf, kNumRawData);//mRawDataBuf>>pRawDataLinearBuf
        if(mStreamAttributeSource->audio_format != AUDIO_FORMAT_PCM_16_BIT)
        {
            kNumRawData = TransferFormat(pRawDataLinearBuf,mStreamAttributeSource->audio_format,
                                         AUDIO_FORMAT_PCM_16_BIT,kNumRawData);
        }
        uint32_t ProcesseddataSize = kNumRawData;
        uint32_t SRC1outputLength = kNumRawData * mBesRecSRCSizeFactor;
        char *pSRC1DataLinearBuf = new char[SRC1outputLength];

        char *p_read = pRawDataLinearBuf;
        uint32_t num_raw_data_left = kNumRawData;
        uint32_t num_converted_data = SRC1outputLength;
        uint32_t consumed = num_raw_data_left;

        //如果输入设备到BesRecord的格式有差异就需要进行格式转换
        if (mNeedBesRecordSRC && (mBliSrcHandler1 != 0))
        {
            mBliSrcHandler1->process((int16_t *)p_read, &num_raw_data_left,
                            (int16_t *)pSRC1DataLinearBuf, &num_converted_data);
            consumed -= num_raw_data_left;
            p_read += consumed;
            ProcesseddataSize = BesRecordPreprocess(pSRC1DataLinearBuf, num_converted_data);

            //transform data format back to StreamAttribue needed after BesRecord process
            if (mBliSrcHandler2 != 0)
            {
                uint32_t SRC2outputLength = ProcesseddataSize * mBesRecSRCSizeFactor2;
                char *pSRC2DataLinearBuf = new char[SRC2outputLength];

                p_read = pSRC1DataLinearBuf;
                num_raw_data_left = ProcesseddataSize;
                consumed = ProcesseddataSize;
                mBliSrcHandler2->process((int16_t *)p_read, &num_raw_data_left,
                            (int16_t *)pSRC2DataLinearBuf, &SRC2outputLength);
                consumed -= num_raw_data_left;
                p_read += consumed;
            } else
            {
                RingBuf_copyFromLinear(&mSrcDataBuf, pSRC1DataLinearBuf, ProcesseddataSize);
            }
        } else    //无需转换设备到BesRecord的格式,普通录音
        {
            ProcesseddataSize = BesRecordPreprocess(pRawDataLinearBuf, kNumRawData);
            //transform data format back to StreamAttribue needed after BesRecord processed
            if (mBliSrcHandler2 != 0)
            {
                uint32_t SRC2outputLength = ProcesseddataSize * mBesRecSRCSizeFactor2;
                char *pSRC2DataLinearBuf = new char[SRC2outputLength];

                p_read = pRawDataLinearBuf;
                num_raw_data_left = ProcesseddataSize;
                consumed = ProcesseddataSize;
                mBliSrcHandler2->process((int16_t *)p_read, &num_raw_data_left,
                            (int16_t *)pSRC2DataLinearBuf, &SRC2outputLength);
                consumed -= num_raw_data_left;
                p_read += consumed;
            } else //普通录音,pRawDataLinearBuf>>mSrcDataBuf
            {
                RingBuf_copyFromLinear(&mSrcDataBuf, pRawDataLinearBuf, ProcesseddataSize);
            }
        }
    } else    //no need to do BesRecord PreProcess, transform data to mStreamAttributeTarget format
    {
        if (mBliSrc == NULL) // No need SRC
        {
            if (mStreamAttributeTarget->audio_format != mStreamAttributeSource->audio_format)
            {
                char *pRawDataLinearBuf = new char[kNumRawData];
                RingBuf_copyToLinear(pRawDataLinearBuf, &mRawDataBuf, kNumRawData);
                kNumRawData = TransferFormat(pRawDataLinearBuf,mStreamAttributeSource->audio_format,
                                             mStreamAttributeTarget->audio_format,kNumRawData);
                RingBuf_copyFromLinear(&mSrcDataBuf, pRawDataLinearBuf, kNumRawData);

            } else
            {
                RingBuf_copyFromRingBuf(&mSrcDataBuf, &mRawDataBuf, kNumRawData);
            }
        } else // Need SRC
        {
            char *pRawDataLinearBuf = new char[kNumRawData];
            RingBuf_copyToLinear(pRawDataLinearBuf, &mRawDataBuf, kNumRawData);

            char *pSrcDataLinearBuf = new char[num_free_space];
            char *p_read = pRawDataLinearBuf;
            uint32_t num_raw_data_left = kNumRawData;
            uint32_t num_converted_data = num_free_space; // max convert num_free_space
            uint32_t consumed = num_raw_data_left;

            if (mStreamAttributeTarget->audio_format == AUDIO_FORMAT_PCM_16_BIT )
            {
                mBliSrc->process((int16_t *)p_read, &num_raw_data_left,
                                (int16_t *)pSrcDataLinearBuf, &num_converted_data);
            } else
            {
                mBliSrc->process((int32_t *)p_read, &num_raw_data_left,
                                (int32_t *)pSrcDataLinearBuf, &num_converted_data);
            }

            consumed -= num_raw_data_left;
            p_read += consumed;
            RingBuf_copyFromLinear(&mSrcDataBuf, pSrcDataLinearBuf, num_converted_data);
        }
    }

    freeSpace = RingBuf_getFreeSpace(&mProcessedDataBuf);
    dataSize = RingBuf_getDataCount(&mSrcDataBuf);
    uint32_t ProcessdataSize = dataSize;

    //android native effect, use the same sample rate as mStreamAttributeTarget
    //如果上层添加了mtk effect,这里就会处理
    if ((mAudioPreProcessEffect->num_preprocessors > 0) && (IsVoIPEnable() == false))
    {
        char *pSrcDataLinearBuf = new char[dataSize];
        uint32_t native_processed_byte = 0;
        RingBuf_copyToLinear(pSrcDataLinearBuf, &mSrcDataBuf, dataSize);
        if (IsNeedChannelRemix()) {
            ProcessdataSize = ApplyChannelRemix((short *)pSrcDataLinearBuf, dataSize);
        }
        native_processed_byte = NativePreprocess(pSrcDataLinearBuf, ProcessdataSize);
        RingBuf_copyFromLinear(&mProcessedDataBuf, pSrcDataLinearBuf, native_processed_byte);
    } else    //no need to do native effect, copy data from mSrcDataBuf to mProcessedDataBuf directly
    {
        if (IsNeedChannelRemix())//录音机,需要重新混音, mSrcDataBuf>>mProcessedDataBuf
        {
            ApplyChannelRemixWithRingBuf(&mSrcDataBuf, &mProcessedDataBuf);
        } else //录像机,不需要重新混音, mSrcDataBuf>>pSrcDataLinearBuf
        {
            RingBuf_copyFromRingBuf(&mProcessedDataBuf, &mSrcDataBuf, dataSize);
        }
    }
    return 0;
}

使用BesRecord进行音频数据的预处理,会区分普通录音和VoIP,VoIP会进行回音消除。

uint32_t AudioALSACaptureDataClient::BesRecordPreprocess(void *buffer , uint32_t bytes)
{
    struct InBufferInfo InBufinfo = {0};
    uint32_t retSize = bytes;
    if (!mBypassBesRecord)
    {
        InBufinfo.pBufBase = (short *)buffer;
        InBufinfo.BufLen = bytes;
        InBufinfo.time_stamp_queued = GetSystemTime(false);
        InBufinfo.bHasRemainInfo = true;
        InBufinfo.time_stamp_predict = GetCaptureTimeStamp();

        retSize = mSPELayer->Process(&InBufinfo);//具体的处理函数
    }
    return retSize;
}

进行声道转换,输入设备提供的是双声道的数据,而录音机需要的是单声道的数据。

ssize_t AudioALSACaptureDataClient::ApplyChannelRemixWithRingBuf(RingBuf *srcBuffer, RingBuf *dstBuffer)
{
    ssize_t remixSize = 0;
    size_t dataSize = RingBuf_getDataCount(srcBuffer);
    size_t availSize = RingBuf_getFreeSpace(dstBuffer);
    size_t dataSizeAfterProcess;
    char *tempBuffer = NULL;
    size_t tempBufferSize;

    if (mChannelRemixOp == CHANNEL_MONO_TO_STEREO) {
        dataSizeAfterProcess = dataSize << 1;
    } else if (mChannelRemixOp == CHANNEL_STEREO_DOWNMIX ||
               mChannelRemixOp == CHANNEL_STEREO_DOWNMIX_L_ONLY ||
               mChannelRemixOp == CHANNEL_STEREO_DOWNMIX_R_ONLY) {
        dataSizeAfterProcess = dataSize >> 1;//录音机需要将双声道转换为单声道,处理之后数据量减少一半
    } else {
        dataSizeAfterProcess = dataSize;
    }
    //申请处理后的存储空间
    tempBufferSize = (dataSizeAfterProcess > dataSize) ? dataSizeAfterProcess : dataSize;
    tempBuffer = new char[tempBufferSize];
    //转换声道数,处理数据
    RingBuf_copyToLinear(tempBuffer, srcBuffer, dataSize);
    remixSize = ApplyChannelRemix((short *)tempBuffer, dataSize);
    RingBuf_copyFromLinear(dstBuffer, tempBuffer, remixSize);

    return remixSize;
}

根据不同的目标声道数将双声道数据进行转换

ssize_t AudioALSACaptureDataClient::ApplyChannelRemix(short *buffer, size_t bytes)
{
    ssize_t remixSize = 0;
    uint32_t remixOp = mChannelRemixOp;
    int frameCount;

    if (remixOp == CHANNEL_STEREO_CROSSMIX_L2R)
    {
        frameCount = bytes >> 2;

        for (int i = 0; i < frameCount; i++) {
            *(buffer + 1) = *buffer;
            buffer += 2;
        }

        remixSize = bytes;
    }
    else if (remixOp == CHANNEL_STEREO_CROSSMIX_R2L)
    {
        frameCount = bytes >> 2;

        for (int i = 0; i < frameCount; i++) {
            *buffer = *(buffer + 1);
            buffer += 2;
        }

        remixSize = bytes;
    }
    else if (remixOp == CHANNEL_STEREO_DOWNMIX)
    {
        short mix;
        short *monoBuffer = buffer;
        frameCount = bytes >> 2;

        for (int i = 0; i < frameCount; i++) {
            mix = (*buffer + *(buffer + 1)) >> 1;
            *monoBuffer = mix;
            monoBuffer++;
            buffer += 2;
        }

        remixSize = bytes >> 1;
    }
    else if (remixOp == CHANNEL_STEREO_DOWNMIX_L_ONLY)//录音机,保留左声道
    {
        short *monoBuffer = buffer;
        frameCount = bytes >> 2;//每个frame等于16*2个byte

        for (int i = 0; i < frameCount; i++) {
            *monoBuffer = *buffer;
            monoBuffer++;
            buffer += 2;
        }

        remixSize = bytes >> 1;//处理之后的数据量减半
    }
    else if (remixOp == CHANNEL_STEREO_DOWNMIX_R_ONLY)
    {
        short *monoBuffer = buffer;
        frameCount = bytes >> 2;

        for (int i = 0; i < frameCount; i++) {
            *monoBuffer = *(buffer+1);
            monoBuffer++;
            buffer += 2;
        }

        remixSize = bytes >> 1;
    }
    else if (remixOp == CHANNEL_MONO_TO_STEREO)
    {
        frameCount = bytes >> 1;
        short *monoBuffer = buffer + frameCount - 1;
        short *stereoBuffer = buffer + (frameCount * 2) - 1;
        short data;

        for (int i = 0; i < frameCount; i++) {
             data = *monoBuffer--;
            *stereoBuffer-- = data;
            *stereoBuffer-- = data;
        }

        remixSize = bytes << 1;
    }

    return remixSize;
}

打开输入设备处理数据之后,接着会读取数据

ssize_t AudioALSACaptureHandlerNormal::read(void *buffer, ssize_t bytes)
{
    mCaptureDataClient->read(buffer, bytes);
    //============Voice UI&Unlock REFERECE=============
    AudioVUnlockDL *VUnlockhdl = AudioVUnlockDL::getInstance();
    if (VUnlockhdl != NULL)
    {
        struct timespec systemtime;
        memset(&systemtime, 0, sizeof(timespec));
        VUnlockhdl->SetUplinkStartTime(systemtime, 0);
    }
    //===========================================
    return bytes;
}

客户端从mProcessedDataBuf读取处理好的数据,每次读取ReadDataBytes字节

ssize_t AudioALSACaptureDataClient::read(void *buffer, ssize_t bytes)
{
    char *pWrite = (char *)buffer;
    char *pStart = (char *)buffer;
    uint32_t RingBufferSize = 0;
    uint32_t ReadDataBytes = bytes;

    int TryCount = 8;
    do
    {
        CheckNativeEffect();    //add here for alsaStreamIn lock holding
        CheckDynamicSpeechMask();
        if (dropBesRecordDataSize > 0)
        {
            /* Drop distortion data */
            RingBufferSize = RingBuf_getDataCount(&mProcessedDataBuf);
            if (RingBufferSize >= dropBesRecordDataSize)
            {
                // Drop dropBesRecordDataSize bytes from RingBuffer
                while(dropBesRecordDataSize > 0)
                {
                    uint32_t dropSize = dropBesRecordDataSize > ReadDataBytes ? 
                                        ReadDataBytes : dropBesRecordDataSize;
                    RingBuf_copyToLinear((char *)pWrite, &mProcessedDataBuf, dropSize);
                    dropBesRecordDataSize -= dropSize;
                }
            }
            else
            {
                // Drop RingBufferSize from RingBuffer
                while(RingBufferSize > 0 && dropBesRecordDataSize > 0)
                {
                    uint32_t dropSize = dropBesRecordDataSize > ReadDataBytes ? 
                                        ReadDataBytes : dropBesRecordDataSize;
                    dropSize = dropSize > RingBufferSize ? RingBufferSize : dropSize;
                    RingBuf_copyToLinear((char *)pWrite, &mProcessedDataBuf, dropSize);
                    RingBufferSize -= dropSize;
                    dropBesRecordDataSize -= dropSize;
                }
            }
        }

        if (dropBesRecordDataSize == 0)
        {
            RingBufferSize = RingBuf_getDataCount(&mProcessedDataBuf);
            if (RingBufferSize >= ReadDataBytes) // ring buffer is enough, copy & exit
            {
                RingBuf_copyToLinear((char *)pWrite, &mProcessedDataBuf, ReadDataBytes);
                ReadDataBytes = 0;
                mLock.unlock();
                break;
            }
            else // ring buffer is not enough, copy all data
            {
                RingBuf_copyToLinear((char *)pWrite, &mProcessedDataBuf, RingBufferSize);
                ReadDataBytes -= RingBufferSize;
                pWrite += RingBufferSize;
            }
        }

        // wait for new data
        if (mWaitWorkCV.waitRelative(mLock, milliseconds(300)) != NO_ERROR)
        {
            ALOGW("%s(), waitRelative fail", __FUNCTION__);
            mLock.unlock();
            break;
        }
        TryCount--;
    }
    while (ReadDataBytes > 0 && (TryCount != 0 || dropBesRecordDataSize != 0));

    if (IsNeedApplyVolume())
    {
        ApplyVolume(buffer, bytes);
    }
    return bytes - ReadDataBytes;
}

你可能感兴趣的:(录音机和录像机的输入通路)