类似文章太多,但是大多代码都有内存溢出的问题,而且都缺少c层调用java层的例子,实际上有了参考博文后,还是有很多坑需要自己填。不过,看了很多博主和帖子后还是能够解决一些问题,但是有些问题,根本找不到,所以我把音频解码播放还有控制部分做了比较详细的例子。
ffmpeg的编译请参考我之前的文章,有编好的库文件在我的下载资源里,github的demo里也有。
opensl库的引用也很简单,cmak里加入opensl就可以了,像这样
target_link_libraries( native-lib
android
ffmpeg
OpenSLES
${log-lib} )
opensl使用步骤大概分三步
//创建OpenSLES引擎
extern "C"
void createEngine() {
SLresult result;
//创建引擎
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
assert(SL_RESULT_SUCCESS == result);
(void) result;
//关联引擎
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
assert(SL_RESULT_SUCCESS == result);
(void) result;
//获取引擎接口, which is needed in order to create other objects
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
assert(SL_RESULT_SUCCESS == result);
(void) result;
//创建输出混音器, with environmental reverb specified as a non-required interface
const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
const SLboolean req[1] = {SL_BOOLEAN_FALSE};
result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
assert(SL_RESULT_SUCCESS == result);
(void) result;
//关联输出混音器
result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// get the environmental reverb interface
// this could fail if the environmental reverb effect is not available,
// either because the feature is not present, excessive CPU load, or
// the required MODIFY_AUDIO_SETTINGS permission was not requested and granted
//获取reverb接口
result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB,
&outputMixEnvironmentalReverb);
if (SL_RESULT_SUCCESS == result) {
result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
outputMixEnvironmentalReverb, &reverbSettings);
(void) result;
}
// ignore unsuccessful result codes for environmental reverb, as it is optional for this example
}
// create buffer queue audio player
extern "C"
void createBufferQueueAudioPlayer(int sampleRate, int channel) {
SLresult result;
if (sampleRate >= 0) {
bqPlayerSampleRate = sampleRate * 1000;
}
//配置音频源
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 1, SL_SAMPLINGRATE_8,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_CENTER, SL_BYTEORDER_LITTLEENDIAN};
if (bqPlayerSampleRate) {
format_pcm.samplesPerSec = bqPlayerSampleRate; //sample rate in mili second
}
format_pcm.numChannels = (SLuint32) channel;
if (channel == 2) {
format_pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
} else {
format_pcm.channelMask = SL_SPEAKER_FRONT_CENTER;
}
SLDataSource audioSrc = {&loc_bufq, &format_pcm};
//配置音频池
SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
SLDataSink audioSnk = {&loc_outmix, NULL};
/*
* create audio player:
* fast audio does not support when SL_IID_EFFECTSEND is required, skip it
* for fast audio case
*/
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_EFFECTSEND,
/*SL_IID_MUTESOLO,*/};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
/*SL_BOOLEAN_TRUE,*/ };
//创建音频播放器
result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk,
bqPlayerSampleRate ? 2 : 3, ids, req);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// 关联播放器
result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// 获取播放接口
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// 获取缓冲队列接口
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
&bqPlayerBufferQueue);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// 注册缓冲队列回调
result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, NULL);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// 获取音效接口
bqPlayerEffectSend = NULL;
if (0 == bqPlayerSampleRate) {
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_EFFECTSEND,
&bqPlayerEffectSend);
assert(SL_RESULT_SUCCESS == result);
(void) result;
}
#if 0 // mute/solo is not supported for sources that are known to be mono, as this is
// get the mute/solo interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_MUTESOLO, &bqPlayerMuteSolo);
assert(SL_RESULT_SUCCESS == result);
(void)result;
#endif
// 获取音量接口
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_VOLUME, &bqPlayerVolume);
assert(SL_RESULT_SUCCESS == result);
(void) result;
//注册事件回调
result = (*bqPlayerPlay)->RegisterCallback(bqPlayerPlay, playOverEvent, NULL);
assert(SL_RESULT_SUCCESS == result);
(void) result;
//设置播放结束回调
result = (*bqPlayerPlay)->SetCallbackEventsMask(bqPlayerPlay, SL_PLAYEVENT_HEADATEND);
assert(SL_RESULT_SUCCESS == result);
(void) result;
// 开始播放音乐
result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PAUSED);
assert(SL_RESULT_SUCCESS == result);
(void) result;
}
void releaseResampleBuf(void) {
if (0 == bqPlayerSampleRate) {
/*
* we are not using fast path, so we were not creating buffers, nothing to do
*/
return;
}
free(resampleBuf);
resampleBuf = NULL;
}
// this callback handler is called every time a buffer finishes playing
extern "C"
void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context) {
assert(bq == bqPlayerBufferQueue);
assert(NULL == context);
// for streaming playback, replace this test by logic to find and fill the next buffer
if (getPCM() < 0) {//解码音频文件
pthread_mutex_unlock(&audioEngineLock);
return;
}
if (NULL != nextBuffer && 0 != nextSize) {
SLresult result;
// enqueue another buffer
result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, nextBuffer, nextSize);
// the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
// which for this code example would indicate a programming error
if (SL_RESULT_SUCCESS != result) {
pthread_mutex_unlock(&audioEngineLock);
}
(void) result;
} else {
releaseResampleBuf();
pthread_mutex_unlock(&audioEngineLock);
}
}
基本就这三步,在我理解里,其实opensl就是一个播放器的api,我们这里也就是简单调用封装好的方法而已。
播放音频当然还需要数据,opensl本身可以读assert、uri的音频数据、pcm数据,我这里因为是为了后面音视频做铺垫,所以加入了ffmpeg来解码音频数据得到pcm数据后,扔到opensl缓存队列就可以了。
具体的解码过程如下:
extern "C"
//int createFFmpegAudioPlay(const char *file_name) {
int Java_com_lake_ndkaudiotest_MainActivity_play(JNIEnv *env, jobject thiz, jstring url) {
isEnd = false;
int i;
AVCodec *pCodec;
//读取输入的音频文件地址
const char *file_name = env->GetStringUTFChars(url, NULL);
LOGI("file_name:%s\n", file_name);
//初始化
av_register_all();
//分配一个AVFormatContext结构
pFormatCtx = avformat_alloc_context();
//打开文件
if (avformat_open_input(&pFormatCtx, file_name, NULL, NULL) != 0) {
LOGE("Couldn't open input stream.\n");
return -1;
}
//查找文件的流信息
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
LOGE("Couldn't find stream information.\n");
return -1;
}
//在流信息中找到音频流
audioindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
audioindex = i;
break;
}
}
if (audioindex == -1) {
LOGE("Couldn't find a video stream.\n");
return -1;
}
iTotalSeconds = (int) pFormatCtx->duration / 1000000;
//获取相应音频流的解码器
AVCodecParameters *pCodecPar = pFormatCtx->streams[audioindex]->codecpar;
pCodec = avcodec_find_decoder(pCodecPar->codec_id);
assert(pCodec != NULL);
pCodecCtx = avcodec_alloc_context3(pCodec);
// Copy context
if (avcodec_parameters_to_context(pCodecCtx, pCodecPar) != 0) {
fprintf(stderr, "Couldn't copy codec context");
return -1; // Error copying codec context
}
//打开解码器
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
LOGE("Couldn't open codec.\n");
return -1;
}
//分配一个帧指针,指向解码后的原始帧
pFrame = av_frame_alloc();
//设置格式转换
swr = swr_alloc();
swr = swr_alloc_set_opts(NULL,
pCodecCtx->channel_layout,
AV_SAMPLE_FMT_S16,
pCodecCtx->sample_rate,
pCodecCtx->channel_layout,
pCodecCtx->sample_fmt,
pCodecCtx->sample_rate,
0, NULL);
if (!swr || swr_init(swr) < 0) {
swr_free(&swr);
return -1;
}
swr_init(swr);
//分配输入缓存
int outputBufferSize = 8192;
outputBuffer = (uint8_t *) malloc(sizeof(uint8_t) * outputBufferSize);
// 创建播放引擎
createEngine();
// 创建缓冲队列音频播放器
createBufferQueueAudioPlayer(pCodecCtx->sample_rate, pCodecCtx->channels);
// 启动音频播放
bqPlayerCallback(bqPlayerBufferQueue, NULL);
return 0;
}
opensl获取pcm数据方法
/**
* 读取pcm数据
* @return
*/
int getPCM() {
while (av_read_frame(pFormatCtx, &packet) >= 0) {
if (packet.stream_index == audioindex) {
int ret = avcodec_send_packet(pCodecCtx, &packet);
timestamp = packet.pts * av_q2d(pFormatCtx->streams[audioindex]->time_base);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
break;
ret = avcodec_receive_frame(pCodecCtx, pFrame);
if (ret < 0 && ret != AVERROR_EOF)
break;
//处理不同的格式
if (pCodecCtx->sample_fmt == AV_SAMPLE_FMT_S16P) {
nextSize = av_samples_get_buffer_size(pFrame->linesize, pCodecCtx->channels,
pCodecCtx->frame_size, pCodecCtx->sample_fmt,
1);
} else {
av_samples_get_buffer_size(&nextSize, pCodecCtx->channels, pCodecCtx->frame_size,
pCodecCtx->sample_fmt, 1);
}
// 音频格式转换
swr_convert(swr, &outputBuffer, pFrame->nb_samples,
(uint8_t const **) (pFrame->extended_data),
pFrame->nb_samples);
nextBuffer = outputBuffer;
av_packet_unref(&packet);
return 0;
}
av_packet_unref(&packet);//packet不用了一定要释放掉内存,网上很多例子的内存溢出就是因为没有释放packet的内存。
}
LOGI("getPCM_shutdown");
return -1;
}
有了以上方法,例子参考,基本播放音频是没有问题了。其余的问题都是看个人需求了,我这里就把我的做法分享一下,由于本人c不是太好,虽然实现了,但是从架构上不清楚是不是这样做合不合理,仅供大家参考。
我先说一下我的需求,c层音频播放出来了,但是在java层,我想显示时间,还有控制音频的播放/暂停,以及seek拖动控制,播放完成自动跳下一首。
一开始,我试了一下opensl本身seek功能,结果获取seek接口就失败了,不得不从ffmpeg解码层考虑seek功能,还好,找到了ffmpeg的seek功能,实现起来完全没有问题,后面的视频seek可以参考一下。
其实就是调用以下方法
//跳转的位置(秒为单位)
int64_t seek_pos = (int64_t) (seekTime /av_q2d(pFormatCtx->streams[audioindex]->time_base));
//跳转方法 音频跳转非常快,没有延迟感
if (av_seek_frame(pFormatCtx, audioindex, seek_pos, AVSEEK_FLAG_BACKWARD) < 0) {
LOGE("%s, av_seek_frame() seek to %.3f failed!", __FUNCTION__,(double) seek_pos / AV_TIME_BASE);
return -2;
}
//清空buffer
avcodec_flush_buffers(pCodecCtx);
时间位置获取也非常简单,根据packet获取就可以了
timestamp = packet.pts * av_q2d(pFormatCtx->streams[audioindex]->time_base);
然后java层来调用以下c层实现的seek方法就可以了
然后下来就时间的回调,之前想直接在回调函数里去调java层的方法,但是回调触发到第二遍的时候,就报错了,不知道是不是JNIEnv使用的关系,但是搞了半天就是不可以,没办法,只能自己起一个方法,用一个while循环去监听我所需要回调的数据,然后在这个while里去调java的方法就不会崩溃,所以想直接在opensl的回调方法去调用java层方法感觉有点不科学,不知道能不能实现。
extern "C"
void Java_com_lake_ndkaudiotest_MainActivity_showtime(JNIEnv *env, jobject thiz) {
int seconds = -1;
int totalSeconds = -1;
bool end = false;
jclass jclazz = env->GetObjectClass(thiz);
jmethodID jmethodIDS = env->GetMethodID(jclazz, "showTime", "(I)V");
jmethodID jmethodIDT = env->GetMethodID(jclazz, "setToatalTime", "(I)V");
jmethodID jmethodIDE = env->GetMethodID(jclazz, "isPlayEnd", "(Z)V");
// make sure the asset audio player was created
// seek
while (true) {
if (timestamp != -1) {//告诉java 音频播放当前时间
if (seconds != timestamp) {
seconds = timestamp;
env->CallVoidMethod(thiz, jmethodIDS, (jint) timestamp);
}
}
if (iTotalSeconds != -1) {//告诉java 音频总时间
if (totalSeconds != iTotalSeconds) {
totalSeconds = iTotalSeconds;
env->CallVoidMethod(thiz, jmethodIDT, (jint) iTotalSeconds);
}
}
if (isEnd != end) {//告诉java层 音频播放完成
end = isEnd;
env->CallVoidMethod(thiz, jmethodIDE, (jboolean) isEnd);
}
usleep(100000);//睡0.1秒 不然一直死循环非常占用cpu资源
}
}
这里利用了监听全局变量,只要一变化就调用java层方法。
java层根据传过来的参数再进行其他处理。
java层代码:
package com.lake.ndkaudiotest;
import android.content.Context;
import android.graphics.Color;
import android.os.Environment;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.BaseAdapter;
import android.widget.Button;
import android.widget.ListView;
import android.widget.SeekBar;
import android.widget.TextView;
import java.io.File;
public class MainActivity extends AppCompatActivity implements View.OnClickListener {
private SeekBar mSeekBar;
private Thread timeThread;
private int mProgress;//播放进度
private ListView listview;
private TextView tVTime;//当前时间
private TextView tVName;//名称
private TextView tTTime;//总时间
private int toTalTime;//总时间
private Button mBtnPlayOrPause;
private Button mBtnLast;
private Button mBtnNext;
private String inputurl;//文件路径
boolean isFirst = true;
private int curItem = 0;//当前序号
boolean playing = false;//播放状态
private int length = 0;//列表长度
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("native-lib");
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
initView();
mSeekBar.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
@Override
public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
Log.e("lake", "onProgressChanged: " + progress);
mProgress = progress;
}
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
pause(true);
}
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
seek(mProgress);
}
});
}
/**
* 界面初始化
*/
private void initView() {
listview = findViewById(R.id.listview);
tVName = findViewById(R.id.filename);
tVTime = findViewById(R.id.showtime);
tTTime = findViewById(R.id.totaltime);
mSeekBar = findViewById(R.id.seekbar);
mBtnPlayOrPause = findViewById(R.id.playorpause);
mBtnLast = findViewById(R.id.last);
mBtnNext = findViewById(R.id.next);
mBtnPlayOrPause.setOnClickListener(this);
mBtnLast.setOnClickListener(this);
mBtnNext.setOnClickListener(this);
final String folderurl = Environment.getExternalStorageDirectory().getPath();
final File[] files = new File(folderurl + "/MyLocalPlayer").listFiles();
length = files.length;
final ListFileAdapter myListAdapter = new ListFileAdapter(this, files);
listview.setAdapter(myListAdapter);
listview.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView> parent, View view, int position, long id) {
curItem = position;
myListAdapter.setSelectItem(position);
myListAdapter.notifyDataSetInvalidated();
inputurl = folderurl + "/MyLocalPlayer/" + files[position].getName();
tVName.setText(files[position].getName().substring(0,files[position].getName().length()-4));
if (!isFirst) {
stop();
mSeekBar.setProgress(0);
}
play(inputurl);
if (isFirst) {
isFirst = false;
timeThread = new Thread(new Runnable() {
@Override
public void run() {
showtime();
}
});
timeThread.start();
}
pause(!playing);
}
});
clickListItem(curItem);
}
@Override
public void onClick(View v) {
switch (v.getId()) {
case R.id.playorpause://播放或者暂停
pause(playing);
mBtnPlayOrPause.setText(playing ? "Play" : "Pause");
playing = !playing;
break;
case R.id.last: {//上一首
int item = (curItem - 1) < 0 ? length-1 : curItem - 1;
clickListItem(item);
break;
}
case R.id.next: {//下一首
int item = (curItem + 1) >= length ? 0 : curItem + 1;
clickListItem(item);
break;
}
default:
break;
}
}
/**
* 点击列表
* @param position 第几项
*/
public void clickListItem(int position){
AdapterView.OnItemClickListener onItemClickListener = listview.getOnItemClickListener();
if (onItemClickListener != null) {
onItemClickListener.onItemClick(listview, null, position, position);
listview.setSelection(position);
}
}
/**
* 关闭播放器
*/
public void shutdown() {
stop();
mSeekBar.setProgress(0);
play(inputurl);
}
/**
* 显示实时进度时间
*
* @param time
*/
public void showTime(final int time) {
final String n = resetTimeInt(time / 3600) + ":" + resetTimeInt(time % 3600 / 60) + ":" + resetTimeInt(time % 60);
runOnUiThread(new Runnable() {
@Override
public void run() {
tVTime.setText(n);
mSeekBar.setProgress(time);
}
});
Log.e("lake", "showTime: " + n);
}
/**
* 设置总时间
*
* @param total
*/
public void setToatalTime(int total) {
toTalTime = total;
mSeekBar.setMax(total);
Log.e("lake", "toTalTime: " + toTalTime);
final String t = resetTimeInt(total / 3600) + ":" + resetTimeInt(total % 3600 / 60) + ":" + resetTimeInt(total % 60);
runOnUiThread(new Runnable() {
@Override
public void run() {
tTTime.setText(t);
}
});
}
/**
* 播放结束
*
* @param isEnd
*/
public void isPlayEnd(boolean isEnd) {
Log.e("lake", "isPlayEnd: " + isEnd);
if (isEnd) {
runOnUiThread(new Runnable() {
@Override
public void run() {
int item = (curItem + 1) >= length ? 0 : curItem + 1;
clickListItem(item);
}
});
}
}
public native void play(String url);
public native void stop();
public native void pause(boolean play);
public native void seek(int seekTime);
public native void showtime();
public String resetTimeInt(int time) {
if (time < 10) {
return "0" + time;
} else {
return time + "";
}
}
class ListFileAdapter extends BaseAdapter {
private Context context;
private File[] files;
public ListFileAdapter(Context context, File[] files) {
this.context = context;
this.files = files;
}
@Override
public int getCount() {
return files.length;
}
@Override
public Object getItem(int position) {
return files[position];
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
ViewHolder viewHolder = null;
if (convertView == null) {
viewHolder = new ViewHolder();
convertView = LayoutInflater.from(context).inflate(R.layout.list_item, null);
viewHolder.mTextView = (TextView) convertView.findViewById(R.id.filename);
convertView.setTag(viewHolder);
} else {
viewHolder = (ViewHolder) convertView.getTag();
}
viewHolder.mTextView.setText(files[position].getName());
if (position == selectItem) {
convertView.setBackgroundColor(Color.GRAY);
} else {
convertView.setBackgroundColor(Color.WHITE);
}
return convertView;
}
class ViewHolder {
TextView mTextView;
}
public void setSelectItem(int selectItem) {
this.selectItem = selectItem;
}
private int selectItem = -1;
}
}
大概就是这样。
实现的demo下载地址:https://github.com/lakehubo/NDKAudioTest