《Android平台使用openSLES采集麦克风音频代码实现》链接:
https://edu.csdn.net/learn/38258/606150?spm=1003.2001.3001.4157
《Android平台使用openSLES播放PCM音频代码实现》链接:
https://edu.csdn.net/learn/38258/606151?spm=1003.2001.3001.4157
在音视频技术中音频的采集和播放是一个及其重要的过程,学习了解音频的采集和播放过程有助于我们对音频技术有更近一步的了解。在前面的文章中我们已经介绍过windows平台下的音频采集和播放,本篇文章讲介绍在Android平台下的音频采集和播放。
OpenSL ES(Open Sound Lib Embedded system)嵌入式系统开放声音库,是一种免版税、跨平台、硬件加速的 C 语言音频API。OpenSL ES能夸平台使用方便音频功能的移植。Android平台的OpenSL ES继承了OpenSL ES参考规范里面的大部分功能,但仍有部分差异所以使用OpenSL ES参考规范的参考代码可能需要修改才能使其在 Android 系统中正常工作。 Android平台里的OpenSL ES与 Android Java 框架中的
和 MediaPlayer
API 提供类似的音频功能。在Android NDK中可以直接使用Android平台的OpenSL ES的API接口进行c/c++的音频项目开发。MediaRecorder
Android NDK使用OpenSL ES开发音频项目的文档以及demo连接如下:OpenSL ES | Android NDK | Android Developers
openSLES的开发过程需要了解两个概念:对象和接口。OpenSL ES 的对象类似于 Java 和 C++ 等编程语言中的对象概念,只不过OpenSL ES 的对象只能通过其关联接口进行访问,即要访问对象的每种功能需要获取接口来获取对应功能的接口。在音频采集或播放的过程,首先要创建对象,讲对象实例化(realize),通过对象的GetInterface获取对象的功能接口,通过接口来配置参数,使用完成后通过Destroy来销毁对象的接口。
在android中使用OpenSL ES需要包含下面的头文件:
#include
#include
通过配置需要在AndroidManifest.xml的配置文件里面增加录音权限,配置如下:
CMake编译的时候需要在CMakeList.txt中加入OpenSLES的链接,如下:
target_link_libraries(
OpenSLES
)
1、采集引擎初始化
static int createAudioCaptureEngine()
{
SLEngineOption pEngineOptions[] = {(SLuint32) SL_ENGINEOPTION_THREADSAFE,
(SLuint32) SL_BOOLEAN_TRUE};
// 创建引擎对象,
SLresult ret;
ret = slCreateEngine(
&m_audioCaptureMng.engineObject, //对象地址,
1, //配置参数数量
pEngineOptions, //配置参数,
0,
NULL,
NULL
);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio slCreateEngine is error %d\n",ret);
return -1;
}
//实例化这个对象
ret = (*m_audioCaptureMng.engineObject)->Realize(m_audioCaptureMng.engineObject, SL_BOOLEAN_FALSE);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Realize is error %d\n",ret);
return -1;
}
//从这个对象里面获取引擎接口
(*m_audioCaptureMng.engineObject)->GetInterface(m_audioCaptureMng.engineObject, SL_IID_ENGINE, &m_audioCaptureMng.engineInterface);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio GetInterface is error %d\n",ret);
return -1;
}
return 0;
}
2、音频采集配置
static int initAudioCapture(int micId, int sample, int chn,int frameLen)
{
int ckSample = checkSampleRate(sample);
m_audioCaptureMng.micCfgInfo.curChn = chn > 2 ? 2:(chn < 1 ? 1 : chn);
m_audioCaptureMng.micCfgInfo.curFrameLenPerChn = frameLen > 2048 ? 2048 : (frameLen < 960 ? 960 : frameLen);
int chnCfg = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;//双声道
if ( m_audioCaptureMng.micCfgInfo.curChn == 1)//单声道
{
chnCfg = SL_SPEAKER_FRONT_CENTER;
}
SLDataLocator_IODevice ioDevice =
{
SL_DATALOCATOR_IODEVICE, //类型 IO设备类型,手机内置麦克
SL_IODEVICE_AUDIOINPUT, //设备类型 选择了音频输入类型
SL_DEFAULTDEVICEID_AUDIOINPUT, //设备ID
NULL//device实例
};
// 输入,SLDataSource 表示音频数据来源的信息
SLDataSource capSource =
{
&ioDevice,//SLDataLocator_IODevice配置输入
NULL//输入格式,采集的并不需要
};
// 数据源简单缓冲队列定位器,输出buffer队列
SLDataLocator_AndroidSimpleBufferQueue capBufferQueue = {
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, //buff 类型
NUM_BUFFER_QUEUE //buffer的数量
};
// PCM 数据源格式 //设置输出数据的格式
SLDataFormat_PCM pcmCfg = {
SL_DATAFORMAT_PCM, //输出PCM格式的数据
(SLuint32)m_audioCaptureMng.micCfgInfo.curChn, // //输出的声道数量
(SLuint32)ckSample, //采样频率
SL_PCMSAMPLEFORMAT_FIXED_16, //输出的采样格式,这里是16bit
SL_PCMSAMPLEFORMAT_FIXED_16,
(SLuint32)chnCfg,//道配置,
SL_BYTEORDER_LITTLEENDIAN //PCM数据的大小端排列
};
// 输出,SLDataSink 表示音频数据输出信息
SLDataSink dataSink = {
&capBufferQueue, //SLDataFormat_PCM配置输出
&pcmCfg //输出数据格式
};
//创建录制的对象,并且指定开放SL_IID_ANDROIDSIMPLEBUFFERQUEUE这个接口
SLInterfaceID iids[2] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
SL_IID_ANDROIDCONFIGURATION};
SLboolean required[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
// 创建 audio recorder 对象
int ret = (*m_audioCaptureMng.engineInterface)->CreateAudioRecorder(m_audioCaptureMng.engineInterface, //引擎接口
&m_audioCaptureMng.recorderObject, //录制对象地址,
&capSource,//输入配置
&dataSink,//输出配置
1,//支持的接口数量
iids, //具体的要支持的接口
required //具体的要支持的接口是开放的还是关闭的
);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio CreateAudioRecorder is error %d\n",ret);
return -1;
}
//实例化这个录制对象
ret = (*m_audioCaptureMng.recorderObject)->Realize(m_audioCaptureMng.recorderObject, SL_BOOLEAN_FALSE);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Realize is error %d\n",ret);
return -1;
}
//获取Buffer接口
ret = (*m_audioCaptureMng.recorderObject)->GetInterface(m_audioCaptureMng.recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
(void *) &m_audioCaptureMng.recorderBufferQueue);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio GetInterface is error %d\n",ret);
return -1;
}
//获取录制接口
(*m_audioCaptureMng.recorderObject)->GetInterface(m_audioCaptureMng.recorderObject, SL_IID_RECORD, &m_audioCaptureMng.audioRecord);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio GetInterface is error %d\n",ret);
return -1;
}
// 注册回调接口,在回调接口中获取pcm数据保存到 buff队列中
ret = (*m_audioCaptureMng.recorderBufferQueue)->RegisterCallback(m_audioCaptureMng.recorderBufferQueue, AudioRecordCallback,NULL);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio RegisterCallback is error %d\n",ret);
return -1;
}
//设置录制器为录制状态 SL_RECORDSTATE_RECORDING
ret = (*m_audioCaptureMng.audioRecord)->SetRecordState(m_audioCaptureMng.audioRecord, SL_RECORDSTATE_RECORDING);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio SetRecordState is error %d\n",ret);
return -1;
}
m_audioCaptureMng.recorderSize = m_audioCaptureMng.micCfgInfo.curChn*m_audioCaptureMng.micCfgInfo.curFrameLenPerChn * sizeof(short);
m_audioCaptureMng.recorderBuffer = new char [m_audioCaptureMng.recorderSize];
//在设置完录制状态后一定需要先Enqueue一次,这样的话才会开始采集回调
(*m_audioCaptureMng.recorderBufferQueue)->Enqueue(m_audioCaptureMng.recorderBufferQueue, m_audioCaptureMng.recorderBuffer, m_audioCaptureMng.recorderSize);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Enqueue is error %d\n",ret);
return -1;
}
LOGI("init Audio recording succeed!(%d,%d,%d,recorderSize %d)~~~~~\n",sample, chn, frameLen,m_audioCaptureMng.recorderSize);
m_audioCaptureMng.audioCaptureSatues = 1;
return 0;
}
3、音频采集停止,资源释放
//关闭音频采集
int stopAudioCapatureDev()
{
if (0 == m_audioCaptureMng.audioCaptureSatues)
{
return 0;
}
SLresult ret = 0;
// 停止录制
if (m_audioCaptureMng.audioRecord != NULL)
{
//设置录制器为停止状态 SL_RECORDSTATE_STOPPED
ret = (*m_audioCaptureMng.audioRecord)->SetRecordState(m_audioCaptureMng.audioRecord, SL_RECORDSTATE_STOPPED);
if(ret < 0)
{
LOGE("createAudioCaptureEngine is error %d\n",ret);
return -1;
}
LOGI("stop Record done\n");
}
// 释放资源
if (m_audioCaptureMng.recorderObject != NULL)
{
(*m_audioCaptureMng.recorderObject)->Destroy(m_audioCaptureMng.recorderObject);
m_audioCaptureMng.recorderObject = NULL;
m_audioCaptureMng.audioRecord = NULL;
m_audioCaptureMng.recorderBufferQueue = NULL;
}
// 释放引擎对象的资源
if (m_audioCaptureMng.engineObject != NULL) {
(*m_audioCaptureMng.engineObject)->Destroy(m_audioCaptureMng.engineObject);
m_audioCaptureMng.engineObject = NULL;
m_audioCaptureMng.engineInterface = NULL;
}
if (m_audioCaptureMng.recorderBuffer)
{
delete [] m_audioCaptureMng.recorderBuffer;
m_audioCaptureMng.recorderBuffer = NULL;
}
m_audioCaptureMng.audioCaptureSatues = 0;
return 0;
}
1、创建音频播放引擎
static int createAudioPlayEngine()
{
SLEngineOption engineOptions[] = {(SLuint32) SL_ENGINEOPTION_THREADSAFE,
(SLuint32) SL_BOOLEAN_TRUE};
// 创建引擎对象,
SLresult ret;
ret = slCreateEngine(
&m_audioPlayMng.engineObject, //对象地址,
1, //配置参数数量
engineOptions, //配置参数
0, //支持的接口数量
NULL,
NULL
);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio slCreateEngine is error %d\n",ret);
return -1;
}
//实例化这个对象 SL_BOOLEAN_FALSE 表示即不使用异步,即使用同步,
ret = (*m_audioPlayMng.engineObject)->Realize(m_audioPlayMng.engineObject, SL_BOOLEAN_FALSE);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Realize is error %d\n",ret);
return -1;
}
//从这个对象里面获取引擎接口
(*m_audioPlayMng.engineObject)->GetInterface(m_audioPlayMng.engineObject, SL_IID_ENGINE, &m_audioPlayMng.engineInterface);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio GetInterface is error %d\n",ret);
return -1;
}
return 0;
}
2、创建混合器
static int createAudioOutPutMix()
{
int ret = (*m_audioPlayMng.engineInterface)->CreateOutputMix(m_audioPlayMng.engineInterface, //引擎接口
&m_audioPlayMng.outputMixObject, //录制对象地址,用于传出对象
0,
0,
0
);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio CreateOutputMix is error %d\n",ret);
return -1;
}
//实例化这个混音器对象
ret = (*m_audioPlayMng.outputMixObject)->Realize(m_audioPlayMng.outputMixObject, SL_BOOLEAN_FALSE);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Realize is error %d\n",ret);
return -1;
}
return 0;
}
3、配置音频播放
static int initAudioPlay(int sample, int chn)
{
int slSample = checkSampleRate(sample);
int curChn = chn > 2 ? 2:(chn < 1 ? 1 : chn);
//int curFrameLenPerChn = frameLen > 2048 ? 2048 : (frameLen < 960 ? 960 : frameLen);
int chnCfg = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
if (curChn == 1)
{
chnCfg = SL_SPEAKER_FRONT_CENTER;
}
// 数据源简单缓冲队列定位器,输出buffer队列
SLDataLocator_AndroidSimpleBufferQueue playBufferQueue = {
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, //buff类型
NUM_BUFFER_QUEUE //buffer的数量
};
// PCM 数据源格式 //设置输出数据的格式
SLDataFormat_PCM pcmCfg = {
SL_DATAFORMAT_PCM, //输出PCM格式的数据
(SLuint32)curChn, // //输出的声道数
(SLuint32)slSample, //输出的采样频率,
SL_PCMSAMPLEFORMAT_FIXED_16, //输出的采样格式,这里是16bit
SL_PCMSAMPLEFORMAT_FIXED_16,//跟随上一个参数
(SLuint32)chnCfg,//声道配置,
SL_BYTEORDER_LITTLEENDIAN //PCM数据的大小端排列
};
// 输出,SLDataSink 表示音频数据输出信息
SLDataSource audioSrc = {
&playBufferQueue, //SLDataFormat_PCM配置输出
&pcmCfg //输出数据格式
};
SLDataLocator_OutputMix outmix = {SL_DATALOCATOR_OUTPUTMIX,m_audioPlayMng.outputMixObject};
SLDataSink audioSink= {&outmix,0};
const SLInterfaceID ids[2] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
int ret = (*m_audioPlayMng.engineInterface)->CreateAudioPlayer(m_audioPlayMng.engineInterface,
&m_audioPlayMng.auidoPlay,
&audioSrc, &audioSink,
2, ids, req);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio CreateAudioPlayer is error %d\n",ret);
return -1;
}
ret = (*m_audioPlayMng.auidoPlay)->Realize(m_audioPlayMng.auidoPlay,SL_BOOLEAN_FALSE);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Realize is error %d\n",ret);
return -1;
}
ret = (*m_audioPlayMng.auidoPlay)->GetInterface(m_audioPlayMng.auidoPlay,SL_IID_PLAY,&m_audioPlayMng.playItf);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio GetInterface is error %d\n",ret);
return -1;
}
ret = (*m_audioPlayMng.auidoPlay)->GetInterface(m_audioPlayMng.auidoPlay,SL_IID_BUFFERQUEUE,&m_audioPlayMng.playBufferQueue);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio GetInterface is error %d\n",ret);
return -1;
}
ret = (*m_audioPlayMng.playBufferQueue)->RegisterCallback(m_audioPlayMng.playBufferQueue,AudioPlayCallback,0);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio RegisterCallback is error %d\n",ret);
return -1;
}
//设置为播放状态
(*m_audioPlayMng.playItf)->SetPlayState(m_audioPlayMng.playItf,SL_PLAYSTATE_PLAYING);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio SetPlayState is error %d\n",ret);
return -1;
}
//启动队列回调
(*m_audioPlayMng.playBufferQueue)->Enqueue(m_audioPlayMng.playBufferQueue,"",1);
if(SL_RESULT_SUCCESS != ret)
{
LOGE("audio Enqueue is error %d\n",ret);
return -1;
}
LOGI("init Audio Play succeed !(%d,%d)~~~~~\n",sample, chn);
return 0;
}
4、音频播放退出资源释放
int audioPlayDeInit()
{
if (0 == m_audioPlayMng.bInit)
{
return 0;
}
int ret = 0;
// 停止录制
if (m_audioPlayMng.playItf != NULL)
{
//设置播放器为停止状态 SL_PLAYSTATE_STOPPED
ret = (*m_audioPlayMng.playItf)->SetPlayState(m_audioPlayMng.playItf, SL_PLAYSTATE_STOPPED);
if(ret < 0)
{
LOGE("SetRecordState is error %d\n",ret);
return -1;
}
}
// 释放资源,
if (m_audioPlayMng.auidoPlay != NULL)
{
(*m_audioPlayMng.auidoPlay)->Destroy(m_audioPlayMng.auidoPlay);
m_audioPlayMng.auidoPlay = NULL;
m_audioPlayMng.playItf = NULL;
m_audioPlayMng.playBufferQueue = NULL;
}
if (m_audioPlayMng.outputMixObject != NULL)
{
(*m_audioPlayMng.outputMixObject)->Destroy(m_audioPlayMng.outputMixObject);
m_audioPlayMng.outputMixObject = NULL;
}
//释放引擎对象的资源
if (m_audioPlayMng.engineObject != NULL) {
(*m_audioPlayMng.engineObject)->Destroy(m_audioPlayMng.engineObject);
m_audioPlayMng.engineObject = NULL;
m_audioPlayMng.engineInterface = NULL;
}
for (size_t i = 0; i < NUM_BUFFER_QUEUE; i++)
{
if (m_audioPlayMng.playBuff[i])
{
delete [] m_audioPlayMng.playBuff[i];
m_audioPlayMng.playBuff[i] = NULL;
}
}
m_audioPlayMng.bInit = 0;
LOGI("release play done\n");
return 0;
}