Android音频开发之使用OpenSL ES API

本文主要记录android端音频使用NDK开发

Android Studio 2.2 提供了CMake 来开发NDK , 只能说更加方便了,没有升级的赶紧去升级吧

相关guides : https://developer.android.com/ndk/guides/audio/index.html

如果Android使用c++还不会的同学转中文api : https://developer.android.com/studio/projects/add-native-code.html#download-ndk

open sl 文档 https://www.khronos.org/registry/sles/specs/OpenSL_ES_Specification_1.0.1.pdf

老外的一篇文章可以参考 https://audioprograming.wordpress.com/2012/03/03/android-audio-streaming-with-opensl-es-and-the-ndk/

google给了两个例子 audio-echo 和 native-audio 先拿例子练手

参考老外的写的测试代码:https://github.com/CL-window/audio_openSL

opensl_io.h 用到的主要的四个函数
分别是 打开/关闭android 音频设备 写入音频数据和读取音频数据
看c 代码之前,关于指针的几个小知识点需要先温习一下下
/** needs know
* -> 是在引用结构体中的变量
* 结构体实例可以通过使用 ‘.’ 符号访问变量。对于结构体实例的指针,我们可以通过 ‘->’ 符号访问变量
* * 当用在声明一个变量时,*表示这里声明了一个指针。其它情况用到*表示指针的取值
* & 地址操作符,用来引用一个内存地址
*/

/*
Open the audio device with a given sampling rate (sr), input and output channels and IO buffer size
in frames. Returns a handle to the OpenSL stream
*/
OPENSL_STREAM* android_OpenAudioDevice(int sr, int inchannels, int outchannels, int bufferframes);
/*
Close the audio device
*/
void android_CloseAudioDevice(OPENSL_STREAM *p);
/*
Read a buffer from the OpenSL stream *p, of size samples. Returns the number of samples read.
*/
int android_AudioIn(OPENSL_STREAM *p, float *buffer,int size);
/*
Write a buffer to the OpenSL stream *p, of size samples. Returns the number of samples written.
*/
int android_AudioOut(OPENSL_STREAM *p, float *buffer,int size);

需要注意的是,播放和录制audio需要在子线程里运行,我使用的是 HandlerThread
只是使用了一个文件 opensl_io.c 和 opensl_io.h
原作者有三个文件 opensl_io.c opensl_io2.c 和 opensl_io3.c
opensl_io.c 是使用线程锁来控制录制和播放
opensl_io2.c 使用 google 推荐的 回调 SLAndroidSimpleBufferQueueItf
opensl_io3.c 也是使用回调,不过播放回调包含了audio信息

audio_recode_play.cpp 文件

//
// Created by slack on 2016/12/27.
//


#include 
#include 
#include "opensl_io.h"
#include "jni.h"

#define LOG(...) __android_log_print(ANDROID_LOG_DEBUG,"AudioDemo-JNI",__VA_ARGS__)

#define SAMPLERATE 44100
#define CHANNELS 1
#define PERIOD_TIME 20 //ms
#define FRAME_SIZE SAMPLERATE*PERIOD_TIME/1000
#define BUFFER_SIZE FRAME_SIZE*CHANNELS
#define TEST_CAPTURE_FILE_PATH "/sdcard/audio.pcm"

static volatile int g_loop_exit = 0;

#ifdef __cplusplus
extern "C" {
#endif

JNIEXPORT jboolean JNICALL
Java_com_cl_slack_audio_1foreign_AudioJNI_startRecodeAudio(JNIEnv *env, jclass type) {

    FILE * fp = fopen(TEST_CAPTURE_FILE_PATH, "wb");
    if( fp == NULL ) {
        LOG("cannot open file (%s)\n", TEST_CAPTURE_FILE_PATH);
        return -1;
    }

    OPENSL_STREAM* stream = android_OpenAudioDevice(SAMPLERATE, CHANNELS, CHANNELS, FRAME_SIZE);
    if (stream == NULL) {
        fclose(fp);
        LOG("failed to open audio device ! \n");
        return JNI_FALSE;
    }

    int samples;
    float buffer[BUFFER_SIZE];
    g_loop_exit = 0;
    while (!g_loop_exit) {
        samples = android_AudioIn(stream, buffer, BUFFER_SIZE);
        if (samples < 0) {
            LOG("android_AudioIn failed !\n");
            break;
        }
        if (fwrite((unsigned char *)buffer, samples*sizeof(short), 1, fp) != 1) {
            LOG("failed to save captured data !\n ");
            break;
        }
        LOG("capture %d samples !\n", samples);
    }

    android_CloseAudioDevice(stream);
    fclose(fp);

    LOG("nativeStartCapture completed !");

    return JNI_TRUE;

}

JNIEXPORT jboolean JNICALL
Java_com_cl_slack_audio_1foreign_AudioJNI_stopRecodeAudio(JNIEnv *env, jclass type) {

    g_loop_exit = 1;
    return JNI_TRUE;

}

JNIEXPORT jboolean JNICALL
Java_com_cl_slack_audio_1foreign_AudioJNI_startPlayAudio(JNIEnv *env, jclass type) {

    FILE * fp = fopen(TEST_CAPTURE_FILE_PATH, "rb");
    if( fp == NULL ) {
        LOG("cannot open file (%s) !\n",TEST_CAPTURE_FILE_PATH);
        return -1;
    }

    OPENSL_STREAM* stream = android_OpenAudioDevice(SAMPLERATE, CHANNELS, CHANNELS, FRAME_SIZE);
    if (stream == NULL) {
        fclose(fp);
        LOG("failed to open audio device ! \n");
        return JNI_FALSE;
    }

    int samples;
    float buffer[BUFFER_SIZE];
    g_loop_exit = 0;
    while (!g_loop_exit && !feof(fp)) {
        if (fread((unsigned char *)buffer, BUFFER_SIZE*2, 1, fp) != 1) {
            LOG("failed to read data \n ");
            break;
        }
        samples = android_AudioOut(stream, buffer, BUFFER_SIZE);
        if (samples < 0) {
            LOG("android_AudioOut failed !\n");
        }
        LOG("playback %d samples !\n", samples);
    }

    android_CloseAudioDevice(stream);
    fclose(fp);

    LOG("nativeStartPlayback completed !");

    return JNI_TRUE;

}

JNIEXPORT jboolean JNICALL
Java_com_cl_slack_audio_1foreign_AudioJNI_stopPlayAudio(JNIEnv *env, jclass type) {

    g_loop_exit = 1;
    return JNI_TRUE;

}

#ifdef __cplusplus
}
#endif

创建Audio Engine

// create engine 创建Audio Engine
    result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // realize the engine 初始化上一步得到的engineObject
    result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // get the engine interface, which is needed in order to create other objects
    //  获取 SLEngineItf 接口对象,后续的操作将使用这个对象
    result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // create output mix, with environmental reverb specified as a non-required interface
    // 使用 engineEngine,创建音频输出 outputMixObject
    const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
    const SLboolean req[1] = {SL_BOOLEAN_FALSE};
    result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // realize the output mix  初始化 outputMixObject
    result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);

音频录制

// create audio recorder 创建AudioRecorder
    // (requires the RECORD_AUDIO permission)
    const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
    const SLboolean req[1] = {SL_BOOLEAN_TRUE};
    result = (*engineEngine)->CreateAudioRecorder(engineEngine, &recorderObject, &audioSrc,
            &audioSnk, 1, id, req);
    if (SL_RESULT_SUCCESS != result) {
        return JNI_FALSE;
    }

    // realize the audio recorder 初始化AudioRecorder
    result = (*recorderObject)->Realize(recorderObject, SL_BOOLEAN_FALSE);
    if (SL_RESULT_SUCCESS != result) {
        return JNI_FALSE;
    }

    // get the record interface 获取录制器接口
    result = (*recorderObject)->GetInterface(recorderObject, SL_IID_RECORD, &recorderRecord);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // get the buffer queue interface 获取音频输入的BufferQueue接口
    result = (*recorderObject)->GetInterface(recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
            &recorderBufferQueue);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // register callback on the buffer queue
    result = (*recorderBufferQueue)->RegisterCallback(recorderBufferQueue, bqRecorderCallback,
            NULL);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

录制 audio

// in case already recording, stop recording and clear buffer queue
    result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_STOPPED);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;
    result = (*recorderBufferQueue)->Clear(recorderBufferQueue);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // the buffer is not valid for playback yet
    recorderSize = 0;

    // enqueue an empty buffer to be filled by the recorder
    // (for streaming recording, we would enqueue at least 2 empty buffers to start things off)
    result = (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recorderBuffer,
            RECORDER_FRAMES * sizeof(short));
    // the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
    // which for this code example would indicate a programming error
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // start recording
    result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_RECORDING);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;
音频播放 audio player
// configure audio source Buffer Queue的参数 和 音频格式
    SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
    SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 1, SL_SAMPLINGRATE_8,
        SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
        SL_SPEAKER_FRONT_CENTER, SL_BYTEORDER_LITTLEENDIAN};

    // 输出源
    SLDataSource audioSrc = {&loc_bufq, &format_pcm};

    // configure audio sink  输出管道
    SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
    SLDataSink audioSnk = {&loc_outmix, NULL};

    const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_EFFECTSEND,
                                    /*SL_IID_MUTESOLO,*/};
    const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
                                   /*SL_BOOLEAN_TRUE,*/ };
    // 创建音频播放对象AudioPlayer
    result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk,
            bqPlayerSampleRate? 2 : 3, ids, req);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // realize the player 初始化AudioPlayer
    result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // get the play interface 获取播放器接口
    result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // get the buffer queue interface 获取音频输出的SLAndroidSimpleBufferQueueItf  bqPlayerPlay接口
    result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
            &bqPlayerBufferQueue);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;

    // register callback on the buffer queue
    result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, NULL);
    assert(SL_RESULT_SUCCESS == result);
    (void)result;


综上 使用OpenSL相关API的通用步骤是:
1.创建对象(通过带有create的函数)
2.初始化(通过Realize函数)
3.获取接口来使用相关功能(通过GetInterface函数)

你可能感兴趣的:(Android音频开发之使用OpenSL ES API)