此篇是对《Android音频框架之一 详解audioPolicy流程及HAL驱动加载》 和《Android音频框架之二 用户录音启动流程源码走读》的延续,
此系列博文是记录在Android7.1系统即以后版本实现内录音功能。
当用户使用 AudioRecord 录音 API 创建 new AudioRecord 后,会创建文件流把 AudioRecord 的音频流写入到文件流或网络流中,
调用的方法时 audioRecord.read() 方法,把数据写入到文件流中,源码如下
public class RecorderDefine {
private static String TAG = "Debug_dump_info:";
AudioRecord mRecord = null;
AudioManager mAudioManager = null;
boolean mReqStop = false;
private static RecorderDefine mRecorderDefine = null;
Thread mRecorderThread = null;
private Method getServiceMethod;
public RecorderDefine(){
init();
}
public static RecorderDefine getInstance(){
if(mRecorderDefine == null ){
mRecorderDefine = new RecorderDefine();
}
Log.d(TAG,"Create RecorderDefine..");
return mRecorderDefine;
}
public void StartRecorder(){
if(mRecorderThread == null){
mRecorderThread = new Thread(new Runnable() {
@Override
public void run() {
recordAndPlay();
}
});
}
mRecorderThread.start();
}
private final int kSampleRate = 44100;
private final int kChannelMode = AudioFormat.CHANNEL_IN_STEREO;
private final int kEncodeFormat = AudioFormat.ENCODING_PCM_16BIT;
private void init() {
int minBufferSize = AudioRecord.getMinBufferSize(kSampleRate, kChannelMode,
kEncodeFormat);
mRecord = new AudioRecord(MediaRecorder.AudioSource.REMOTE_SUBMIX,
kSampleRate, kChannelMode, kEncodeFormat, minBufferSize * 2); //>REMOTE_SUBMIX
Log.d(TAG,"Create AudiRecord ...");
}
private final int kFrameSize = 2048;
private String filePath = "/voice-sub.pcm";
private String outfile = "/voice-sub.wav";
private void recordAndPlay() {
FileOutputStream os = null;
mRecord.startRecording();
byte[] buffer = new byte[kFrameSize];
int num = 0;
try {
os = new FileOutputStream(Environment.getExternalStorageDirectory().getPath()+filePath);
while (!mReqStop) {
num = mRecord.read(buffer, 0, kFrameSize); //> 读取 AudioRecord 中音频流
Log.d(TAG, "buffer = " + buffer.toString() + ", num = " + num);
os.write(buffer, 0, num); //> 写入文件流中
}
Log.d(TAG, "exit loop");
os.close();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Dump PCM to file failed");
}
mRecord.stop();
mRecord.release();
mRecord = null;
mReqStop = false;
PcmToWavUtil pcmToWavUtil = new PcmToWavUtil(kSampleRate,2,kEncodeFormat);//> 把音频 pcm裸流转换为 wav 格式
pcmToWavUtil.pcmToWav(Environment.getExternalStorageDirectory().getPath()+filePath,
Environment.getExternalStorageDirectory().getPath()+outfile);
Log.d(TAG, "out wav format file:" + outfile);
}
public void StopRecord(){
mReqStop = true;
Log.d(TAG, "onClick StopRecord");
}
}
此 wav 格式音频流就可以通过 VLC 播放器进行播放验证,下面会分享录制后的文件。
源码:android7.1 版本,硬件平台 RK3288-box。
安卓系统录制时、内核程序流程是什么呢?本篇对录制启动、录制和结束代码进行走读。
上篇中已经走读 AudioRecord init过程,录制的startInput是在前面基础上,也就是说 init 过程已经获取设备相关信息,此部分可是创建设备、并启动 startInput 流。
用户在App中录音时,调用的 JNI 接口如下:
@ frameworks/base/core/jni/android_media_AudioRecord.cpp
static const JNINativeMethod gMethods[] = {
// name, signature, funcPtr
{"native_start", "(II)I", (void *)android_media_AudioRecord_start}, //> 启动录音JNI 接口
{"native_stop", "()V", (void *)android_media_AudioRecord_stop}, //> 停止录音 JNI 接口
{"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIII[ILjava/lang/String;J)I",
(void *)android_media_AudioRecord_setup}, //> 设置 AudioRecord 参数
}
接口源码如下:
static jint
android_media_AudioRecord_start(JNIEnv *env, jobject thiz, jint event, jint triggerSession)
{
sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);
if (lpRecorder == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException", NULL);
return (jint) AUDIO_JAVA_ERROR;
}
return nativeToJavaStatus(
lpRecorder->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
}
static sp<AudioRecord> getAudioRecord(JNIEnv* env, jobject thiz)
{
Mutex::Autolock l(sLock);
AudioRecord* const ar =
(AudioRecord*)env->GetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
return sp<AudioRecord>(ar);
}
获取 lpRecorder 是谁呢? 就是程序在 init 时创建的 Audio client 端实例,通过此实例方法发送 同步事件到 系统 AudioRecord 服务,系统服务器启动录音。
调用的 start() 方法,就是 AudioRecord::start() 方法,见 step 2 中内容。下面是启动audioRecord的打印信息。
03-14 15:02:34.005 2239 2239 D AudioRecord: Debug_dump_info: android::AudioRecord::AudioRecord(const android::String16 &) : 73 , create AudioRecord ...
03-14 15:02:34.005 2239 2239 D AudioRecord-JNI: Debug_dump_info: android_media_AudioRecord_setup,267 opPackageNameStr:com.dds.webrtc //> 调试 Demo 包名称
03-14 15:02:34.005 2239 2239 D AudioRecord-JNI: Debug_dump_info: android_media_AudioRecord_setup, 279, AudioRecord_setup for source=8 tags= flags=00000000
03-14 15:02:34.005 2239 2239 D AudioRecord: Debug_dump_info line:154 inputSource 8, sampleRate 44100, format 0x1, channelMask 0xc, frameCount 4096, notificationFrames 0, sessionId 0, transferType 0, flags 0, opPackageName com.dds.webrtc uid -1, pid -1
03-14 15:02:34.005 2239 2239 D AudioRecord: Debug_dump_info line:194 Building AudioRecord with attributes: source=8 flags=0x0 tags=[]
03-14 15:02:34.005 2239 2239 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::set(audio_source_t, uint32_t, audio_format_t, audio_channel_mask_t, size_t, callback_t, void *, uint32_t, bool, audio_session_t, android::AudioRecord::transfer_type, audio_input_flags_t, int, pid_t, const audio_attributes_t *),237 mSessionId 17
03-14 11:06:59.373 1627 1627 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::openRecord_l(const Modulo<uint32_t> &, const android::String16 &) , 718
03-14 11:06:59.373 1627 1627 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::set(audio_source_t, uint32_t, audio_format_t, audio_channel_mask_t, size_t, callback_t, void *, uint32_t, bool, audio_session_t, android::AudioRecord::transfer_type, audio_input_flags_t, int, pid_t, const audio_attributes_t *),263 set() runStatus:0
03-14 11:06:59.373 1627 1627 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::set(audio_source_t, uint32_t, audio_format_t, audio_channel_mask_t, size_t, callback_t, void *, uint32_t, bool, audio_session_t, android::AudioRecord::transfer_type, audio_input_flags_t, int, pid_t, const audio_attributes_t *),287 set() run over
03-14 11:06:59.373 1627 1627 D AudioRecord-JNI: Debug_dump_info: android_media_AudioRecord_setup,359 //> _setup接口 JNI 调用
AudioRecord::start() 源码如下
@ frameworks/av/media/libmedia/AudioRecord.cpp
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
{
ALOGD("Debug_dump_info: %d sync event %d trigger session %d", __LINE__, event, triggerSession);
AutoMutex lock(mLock);
if (mActive) {
return NO_ERROR;
}
mMarkerReached = false;
mActive = true;
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
status = mAudioRecord->start(event, triggerSession); //> 把事件和 Session发送至 服务端
if (status == DEAD_OBJECT) {
flags |= CBLK_INVALID;
}
}
if (status != NO_ERROR) {
mActive = false;
ALOGE("start() status %d", status);
} else {
sp<AudioRecordThread> t = mAudioRecordThread;
if (t != 0) {
t->resume();
} else {
mPreviousPriority = getpriority(PRIO_PROCESS, 0);
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
}
return status;
}
系统调试日志,此 Session id 就是在init阶段建立的,服务端根据此 id 启动Input流,由此引发对 TinyAlsaHAL 层函数的调用。
3-14 17:53:49.592 11539 11655 D AudioRecord: Debug_dump_info: 295 sync event 0 trigger session 0 //> 发送 事件提示信息
03-14 17:53:49.596 229 229 D APM_AudioPolicyManager: Debug_dump_info: virtual status_t android::AudioPolicyManager::startInput(audio_io_handle_t, audio_session_t),1641 startInput() input 30
03-14 17:53:49.596 229 229 D APM_AudioPolicyManager: Debug_dump_info: audio_devices_t android::AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t, android::AudioMix **),5045
03-14 17:53:49.596 229 229 D APM_AudioPolicyManager: Debug_dump_info: audio_devices_t android::AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t, android::AudioMix **),5049
03-14 17:53:49.597 229 229 D APM_AudioPolicyManager: Debug_dump_info: virtual audio_devices_t android::AudioPolicyManager::getDeviceForInputSource(audio_source_t) , 5058 mInputRoutes.size:1
03-14 17:53:49.597 229 229 D APM_AudioPolicyManager: Debug_dump_info: virtual audio_devices_t android::AudioPolicyManager::getDeviceForInputSource(audio_source_t) , 5066 routeIndex:1
03-14 17:53:49.597 229 229 D APM_AudioPolicyManager: Debug_dump_info: status_t android::AudioPolicyManager::setInputDevice(audio_io_handle_t, audio_devices_t, bool, audio_patch_handle_t *) , 4924
03-14 17:53:49.598 229 289 D AudioFlinger::PatchPanel: Debug_dump_info: status_t android::AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *, audio_patch_handle_t *) , 164
03-14 17:53:49.598 229 289 D AudioFlinger::PatchPanel: Debug_dump_info: status_t android::AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *, audio_patch_handle_t *) , 245
03-14 17:53:49.599 229 11558 D AudioParameter: Debug_dump_info: android::AudioParameter::AudioParameter(const android::String8 &) : 64 , create AudioParameter ...
03-14 17:53:49.599 229 11558 D AudioHardwareTiny: Debug_dump_info: in_set_parameters: kvpairs = a2dp_sink_address=0;input_source=8;routing=-2147483392
03-14 17:53:49.599 229 11558 D AudioHardwareTiny: Debug_dump_info: in_set_parameters : 1994 source: 8
03-14 17:53:49.599 229 11558 D AudioHardwareTiny: Debug_dump_info: in_set_parameters : 2000 in->device:256
03-14 17:53:49.599 229 11558 D AudioHardwareTiny: Debug_dump_info: in_set_parameters , 2012 Device:256
03-14 17:53:49.599 229 11558 D AudioHardwareTiny: Debug_dump_info: in_set_parameters , 2018
03-14 17:53:49.599 229 11558 D AudioHardwareTiny: Debug_dump_info: in_set_parameters: exit: status(0)
03-14 17:53:49.600 229 289 D AudioFlinger::PatchPanel: Debug_dump_info: status_t android::AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *, audio_patch_handle_t *) , 343
03-14 17:53:49.600 229 229 D APM_AudioPolicyManager: Debug_dump_info: setInputDevice() createAudioPatch returned 0 patchHandle 44
03-14 17:53:49.600 229 229 D APM_AudioPolicyManager: Debug_dump_info: found 1 profiles, 0 outputs
03-14 17:53:49.600 229 229 D APM_AudioPolicyManager: Debug_dump_info: opening output for device 00008000 with params 0 profile 0xb0f13300
03-14 17:53:49.601 229 229 D AudioHardwareTiny: Debug_dump_info: adev_open_output_stream devices = 0x8000, flags = 0, samplerate = 48000
03-14 17:53:49.601 229 229 D AudioHardwareTiny: Debug_dump_info: adev_open_output_stream : 2397
03-14 17:53:49.601 229 229 D AudioHardwareTiny: Debug_dump_info: out->config.rate = 44100, out->config.channels = 2 out->config.format = 0,out->config.flag = 0
此源码在RK3288平台上实现、内容如下:
@ hardware/rockchip/audio/tinyalsa_hal/audio_hw.c
static int start_input_stream(struct stream_in *in)
{
struct audio_device *adev = in->dev;
int ret = 0;
//ALOGV("Debug_dump_info: %s : %d , in->device: 0x%04x \n ", __func__, __LINE__, in->device);
ALOGD("Debug_dump_info: %s : %d enter process", __func__, __LINE__ );
in_dump(in, 0);
route_pcm_open(getRouteFromDevice(in->device | AUDIO_DEVICE_BIT_IN));
if(in->input_source == 0x08){ //> Remote_submix type
ALOGD("Debug_dump_info: %s : %d ,PCM_CARD:%d PCM_DEVICE:%d PCM_IN:%d ", __func__, __LINE__, PCM_CARD, PCM_DEVICE_SCO, PCM_IN);
in->pcm = pcm_open(PCM_CARD, PCM_DEVICE_SCO, PCM_IN, in->config);
}else{
ALOGD("Debug_dump_info: %s : %d ,PCM_CARD:%d PCM_DEVICE:%d PCM_IN:%d ", __func__, __LINE__, PCM_CARD, PCM_DEVICE, PCM_IN);
in->pcm = pcm_open(PCM_CARD, PCM_DEVICE, PCM_IN, in->config);
}
if (in->pcm && !pcm_is_ready(in->pcm)) {
ALOGE("Debug_dump_info: pcm_open() failed: %s", pcm_get_error(in->pcm));
pcm_close(in->pcm);
return -ENOMEM;
}
/* if no supported sample rate is available, use the resampler */
if (in->resampler)
in->resampler->reset(in->resampler);
in->frames_in = 0;
adev->input_source = in->input_source;
adev->in_device = in->device;
adev->in_channel_mask = in->channel_mask;
ALOGD("Debug_dump_info: %s : %d , sourceType: %4x \n ", __func__, __LINE__, in->input_source);
if (in->device & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)
start_bt_sco(adev);
/* initialize volume ramp */
in->ramp_frames = (CAPTURE_START_RAMP_MS * in->requested_rate) / 1000;
in->ramp_step = (uint16_t)(USHRT_MAX / in->ramp_frames);
in->ramp_vol = 0;;
ALOGD("Debug_dump_info: %s : %d exit process", __func__, __LINE__ );
return 0;
}
笔者在源码中修改对 Remote_submix 输入源类型支持,对应的声卡和PCM_DEVICE如源码中描述,读者如不是很了解对应关系,
请参考《ubuntu 20 使用命令行 snd-aloop 实现内录音、录制音乐播放器的音频》 博文内容,snd-aloop声卡驱动配置文件中,
已经指定此 PCM_DEVICE 为 1 时,通过 snd-aloop驱动获取的音频数据,就是系统播放音频内容。
调试日志内容如下:
03-14 15:03:29.299 230 1409 D APM_AudioPolicyManager: Debug_dump_info: setInputDevice() createAudioPatch returned 0 patchHandle 44
03-14 15:03:29.300 230 1409 D APM_AudioPolicyManager: Debug_dump_info: found 1 profiles, 0 outputs
03-14 15:03:29.300 230 1409 D APM_AudioPolicyManager: Debug_dump_info: opening output for device 00008000 with params 0 profile 0xac813300
03-14 15:03:29.300 230 1409 D AudioHardwareTiny: Debug_dump_info: adev_open_output_stream devices = 0x8000, flags = 0, samplerate = 48000
03-14 15:03:29.300 230 1409 D AudioHardwareTiny: Debug_dump_info: adev_open_output_stream : 2397
03-14 15:03:29.300 230 1409 D AudioHardwareTiny: Debug_dump_info: out->config.rate = 44100, out->config.channels = 2 out->config.format = 0,out->config.flag = 0
03-14 15:03:29.302 230 1409 D APM_AudioPolicyManager: Debug_dump_info: AudioPolicyManager::startInput() input source = 8
03-14 15:03:29.304 230 2278 D AudioHardwareTiny: Debug_dump_info: start_input_stream : 826 enter process
03-14 15:03:29.305 230 2278 D AudioHardwareTiny: Debug_dump_info: getInputRouteFromDevice :361 device:80000100
03-14 15:03:29.305 230 2278 D alsa_route: Debug_dump_info: enter route_pcm_open() route: 25
03-14 15:03:29.305 230 2278 D alsa_route: Debug_dump_info:route 25 error for codec or hdmi!
03-14 15:03:29.305 230 2278 D alsa_route: Debug_dump_info: route_pcm_open exit
03-14 15:03:29.305 230 2278 D AudioHardwareTiny: Debug_dump_info: start_input_stream : 876 ,PCM_CARD:0 PCM_DEVICE:1 PCM_IN:268435456 /> 打开的 SND 和 PCM_DEVICE
03-14 15:03:29.305 230 2278 D AudioHardwareTiny: Debug_dump_info: start_input_stream : 898 , sourceType: 8
03-14 15:03:29.305 230 2278 D AudioHardwareTiny: Debug_dump_info: start_input_stream : 906 exit process
声卡设备打开完成,解下来就是读取 AudioRecord 的音频数据。
读取声卡音频数据源码如下;
@ hardware/rockchip/audio/tinyalsa_hal/audio_hw.c
static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
size_t bytes)
{
int ret = 0;
struct stream_in *in = (struct stream_in *)stream;
struct audio_device *adev = in->dev;
size_t frames_rq = bytes / audio_stream_in_frame_size(stream);
/*
* acquiring hw device mutex systematically is useful if a low
* priority thread is waiting on the input stream mutex - e.g.
* executing in_set_parameters() while holding the hw device
* mutex
*/
pthread_mutex_lock(&in->lock);
if (in->standby) {
pthread_mutex_lock(&adev->lock);
ret = start_input_stream(in);
pthread_mutex_unlock(&adev->lock);
if (ret < 0)
goto exit;
in->standby = false;
#ifdef AUDIO_3A
if (adev->voice_api != NULL) {
adev->voice_api->start();
}
#endif
}
/*if (in->num_preprocessors != 0)
ret = process_frames(in, buffer, frames_rq);
else */
ALOGD("Debug_dump_info: %s: line:%d ", __FUNCTION__, __LINE__);
ret = read_frames(in, buffer, frames_rq);
if (ret > 0)
ret = 0;
#ifdef AUDIO_3A
do {
if (adev->voice_api != NULL) {
int ret = 0;
ret = adev->voice_api->quueCaputureBuffer(buffer, bytes);
if (ret < 0) break;
ret = adev->voice_api->getCapureBuffer(buffer, bytes);
if (ret < 0) memset(buffer, 0x00, bytes);
}
} while (0);
ALOGD("Debug_dump_info: %s: line:%d ", __FUNCTION__, __LINE__);
#endif
//if (in->ramp_frames > 0)
// in_apply_ramp(in, buffer, frames_rq);
/*
* Instead of writing zeroes here, we could trust the hardware
* to always provide zeroes when muted.
*/
//if (ret == 0 && adev->mic_mute)
// memset(buffer, 0, bytes);
#ifdef SPEEX_DENOISE_ENABLE
if(!adev->mic_mute && ret== 0) {
int index = 0;
int startPos = 0;
spx_int16_t* data = (spx_int16_t*) buffer;
int channel_count = audio_channel_count_from_out_mask(in->channel_mask);
int curFrameSize = bytes/(channel_count*sizeof(int16_t));
long ch;
if(curFrameSize != 2*in->mSpeexFrameSize)
ALOGD("the current request have some error mSpeexFrameSize %d bytes %d ",in->mSpeexFrameSize,bytes);
ALOGD("Debug_dump_info: %s line %d , startPos: %d ", __FUNCTION__, __LINE__, startPos);
while(curFrameSize >= startPos+in->mSpeexFrameSize) {
for(index = startPos; index< startPos +in->mSpeexFrameSize ; index++ )
in->mSpeexPcmIn[index-startPos] = data[index*channel_count]/2 + data[index*channel_count+1]/2;
speex_preprocess_run(in->mSpeexState,in->mSpeexPcmIn);
#ifndef TARGET_RK2928
for(ch = 0 ; ch < channel_count; ch++)
for(index = startPos; index< startPos + in->mSpeexFrameSize ; index++ ) {
data[index*channel_count+ch] = in->mSpeexPcmIn[index-startPos];
}
#else
for(index = startPos; index< startPos + in->mSpeexFrameSize ; index++ ) {
int tmp = (int)in->mSpeexPcmIn[index-startPos]+ in->mSpeexPcmIn[index-startPos]/2;
data[index*channel_count+0] = tmp > 32767 ? 32767 : (tmp < -32768 ? -32768 : tmp);
}
for(int ch = 1 ; ch < channel_count; ch++)
for(index = startPos; index< startPos + in->mSpeexFrameSize ; index++ ) {
data[index*channel_count+ch] = data[index*channel_count+0];
}
#endif
startPos += in->mSpeexFrameSize;
}
}
#endif
exit:
if (ret < 0)
usleep(bytes * 1000000 / audio_stream_in_frame_size(stream) /
in_get_sample_rate(&stream->common));
pthread_mutex_unlock(&in->lock);
return bytes;
}
此源码不做解释,看下面调试打印日志内容吧。
03-14 15:03:29.305 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read: line:2128
03-14 15:03:29.308 2239 2277 D AudioRecord: Debug_dump_info: virtual bool android::AudioRecord::AudioRecordThread::threadLoop() , 1268
03-14 15:03:29.308 2239 2277 D AudioRecord: Debug_dump_info: nsecs_t android::AudioRecord::processAudioBuffer() , 926
03-14 15:03:29.308 2239 2277 D AudioRecord: Debug_dump_info: virtual bool android::AudioRecord::AudioRecordThread::threadLoop() , 1268
03-14 15:03:29.314 2239 2951 D AudioRecord: Debug_dump_info: ssize_t android::AudioRecord::read(void *, size_t, bool) , 881
03-14 15:03:29.328 230 2278 D AudioHardwareTiny: Debug_dump_info: frames_wr:0,buf.frame_count:1024,frame_size:4====
03-14 15:03:29.328 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read line 2167 , startPos: 0
03-14 15:03:29.329 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read: line:2128
03-14 15:03:29.329 2239 2951 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::obtainBuffer(android::AudioRecord::Buffer *, const struct timespec *, struct timespec *, size_t *) , 838 frameCount:512
03-14 15:03:29.329 2239 2951 D Debug_dump_info:: buffer = [B@7be48d1, num = 2048
03-14 15:03:29.329 2239 2951 D AudioRecord: Debug_dump_info: ssize_t android::AudioRecord::read(void *, size_t, bool) , 881
03-14 15:03:29.329 2239 2951 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::obtainBuffer(android::AudioRecord::Buffer *, const struct timespec *, struct timespec *, size_t *) , 838 frameCount:512
03-14 15:03:29.329 2239 2951 D Debug_dump_info:: buffer = [B@7be48d1, num = 2048
03-14 15:03:29.330 2239 2951 D AudioRecord: Debug_dump_info: ssize_t android::AudioRecord::read(void *, size_t, bool) , 881
03-14 15:03:29.351 230 2278 D AudioHardwareTiny: Debug_dump_info: frames_wr:0,buf.frame_count:1024,frame_size:4====
03-14 15:03:29.351 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read line 2167 , startPos: 0
03-14 15:03:29.352 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read: line:2128
03-14 15:03:29.352 2239 2951 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::obtainBuffer(android::AudioRecord::Buffer *, const struct timespec *, struct timespec *, size_t *) , 838 frameCount:512
03-14 15:03:29.352 2239 2951 D Debug_dump_info:: buffer = [B@7be48d1, num = 2048
03-14 15:03:29.352 2239 2951 D AudioRecord: Debug_dump_info: ssize_t android::AudioRecord::read(void *, size_t, bool) , 881
03-14 15:03:29.352 2239 2951 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::obtainBuffer(android::AudioRecord::Buffer *, const struct timespec *, struct timespec *, size_t *) , 838 frameCount:512
03-14 15:03:29.352 2239 2951 D Debug_dump_info:: buffer = [B@7be48d1, num = 2048
用户点击按钮停止录音,调用 JNI 接口是 android_media_AudioRecord_stop() 对应着 AudioRecord::stop() 方法,源码如下:
void AudioRecord::stop()
{
AutoMutex lock(mLock);
if (!mActive) {
return;
}
mActive = false;
mProxy->interrupt();
mAudioRecord->stop();
// Note: legacy handling - stop does not clear record marker and
// periodic update position; we update those on start().
ALOGD("Debug_dump_info: %s , %d ", __func__, __LINE__);
sp<AudioRecordThread> t = mAudioRecordThread;
if (t != 0) {
t->pause();
} else {
setpriority(PRIO_PROCESS, 0, mPreviousPriority);
set_sched_policy(0, mPreviousSchedulingGroup);
}
}
此部分牵扯出 AudioPoxy 相关内容,我不在此展开,我们知道是通过 JNI 接口来停止录音的。姑且这样吧。调试日志如下:
03-14 15:03:43.938 2239 2951 D AudioRecord: Debug_dump_info: ssize_t android::AudioRecord::read(void *, size_t, bool) , 881
03-14 15:03:43.942 2239 2239 D Debug_dump_info:: onClick StopRecord //> 点击停止录音
03-14 15:03:43.958 230 2278 D AudioHardwareTiny: Debug_dump_info: frames_wr:0,buf.frame_count:1024,frame_size:4====
03-14 15:03:43.958 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read line 2167 , startPos: 0
03-14 15:03:43.959 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read: line:2128
03-14 15:03:43.959 2239 2951 D AudioRecord: Debug_dump_info: status_t android::AudioRecord::obtainBuffer(android::AudioRecord::Buffer *, const struct timespec *, struct timespec *, size_t *) , 838 frameCount:512
03-14 15:03:43.960 2239 2951 D Debug_dump_info:: buffer = [B@7be48d1, num = 2048
03-14 15:03:43.960 2239 2951 D Debug_dump_info:: exit loop //> 退出录音
03-14 15:03:43.981 230 2278 D AudioHardwareTiny: Debug_dump_info: frames_wr:0,buf.frame_count:1024,frame_size:4====
03-14 15:03:43.981 230 2278 D AudioHardwareTiny: Debug_dump_info: in_read line 2167 , startPos: 0
03-14 15:03:43.986 230 2278 D AudioHardwareTiny: Debug_dump_info: in_set_parameters: kvpairs = routing=0
03-14 15:03:43.986 230 2278 D AudioHardwareTiny: Debug_dump_info: in_set_parameters : 1994 source: -2
03-14 15:03:43.986 230 2278 D AudioHardwareTiny: Debug_dump_info: in_set_parameters : 2000 in->device:256
03-14 15:03:43.986 230 2278 D AudioHardwareTiny: Debug_dump_info: in_set_parameters , 2012 Device:256
03-14 15:03:43.986 230 2278 D AudioHardwareTiny: Debug_dump_info: in_set_parameters , 2018
03-14 15:03:43.986 230 2278 D AudioHardwareTiny: Debug_dump_info: in_set_parameters: exit: status(0)
03-14 15:03:43.986 230 230 D APM_AudioPolicyManager: Debug_dump_info: resetInputDevice() releaseAudioPatch returned 0
03-14 15:03:43.987 2239 2951 D AudioRecord: Debug_dump_info: void android::AudioRecord::stop() , 364 //> 停止录音
03-14 15:03:43.988 2239 2951 D AudioRecord: Debug_dump_info: virtual android::AudioRecord::AudioRecordThread::~AudioRecordThread() , 1263
03-14 15:03:43.989 230 300 D APM_AudioPolicyManager: Debug_dump_info: void android::AudioPolicyManager::closeInput(audio_io_handle_t),4245 closeInput(30) //> 关闭输入流
03-14 15:03:44.091 2239 2951 D Debug_dump_info:: out wav format file:/voice-sub.wav
简单总结下:
1>. 用户 new AudioRecord() 调用是 audio_setup() 方法,初始化 input 设备;
2>. 开始录音、调用是 audio_start() 方法,打开声卡设备并读取音频流数据;
3>. 停止录音、调用是 audio_stop() 方法,关闭输入设备。
至此连续三篇内容,基本把通过 snd-aloop 虚拟声卡、实现 Android系统声音录制全过程给如实的说清楚了,我把录制的音频文件放到网盘上,
如有需要请自行下载。
链接:https://pan.baidu.com/s/1JpQbdJXGM2wB01I_6MsBUA
提取码:8bu6
如果有读者想验证这个内容的话,其中还涉及到 android 系统相关属性配置、编译文件 Android.mk 内容修改,后期我有时间会继续分享
《Android 8.1 系统裁剪、定制化实践 snd-aloop 内录音》内容。
附录 pcmToWav.java 源码
public class PcmToWavUtil {
/**
* 缓存的音频大小
*/
private int mBufferSize;
/**
* 采样率
*/
private int mSampleRate;
/**
* 声道数
*/
private int mChannel;
/**
* @param sampleRate sample rate、采样率
* @param channel channel、声道
* @param encoding Audio data format、音频格式
*/
PcmToWavUtil(int sampleRate, int channel, int encoding) {
this.mSampleRate = sampleRate;
this.mChannel = channel;
this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannel, encoding);
}
/**
* pcm文件转wav文件
*
* @param inFilename 源文件路径
* @param outFilename 目标文件路径
*/
public void pcmToWav(String inFilename, String outFilename) {
FileInputStream in;
FileOutputStream out;
long totalAudioLen;
long totalDataLen;
long longSampleRate = mSampleRate;
int channels = mChannel == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
long byteRate = 16 * mSampleRate * channels / 8;
byte[] data = new byte[mBufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
writeWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
while (in.read(data) != -1) {
out.write(data);
}
in.close();
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 加入wav文件头
*/
private void writeWaveFileHeader(FileOutputStream out, long totalAudioLen,
long totalDataLen, long longSampleRate, int channels, long byteRate)
throws IOException {
byte[] header = new byte[44];
// RIFF/WAVE header
header[0] = 'R';
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
//WAVE
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
// 'fmt ' chunk
header[12] = 'f';
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
// 4 bytes: size of 'fmt ' chunk
header[16] = 16;
header[17] = 0;
header[18] = 0;
header[19] = 0;
// format = 1
header[20] = 1;
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
// block align
header[32] = (byte) (2 * 16 / 8);
header[33] = 0;
// bits per sample
header[34] = 16;
header[35] = 0;
//data
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}