本文主要是记录Android端音频开发
本例记录使用AudioRecord 录制音频,播放使用AudioTrack,存储的文件为pcm
只是简单的测试用例,界面同上文
注意添加权限
1 . AudioRecord 的工作流程:--- 开始采集
--- 需要一个线程,不断地从 AudioRecord 的缓冲区将音频数据“读”出来,注意,这个过程一定要及时,否则就会出现“overrun”的错误,该错误在音频开发中比较常见,意味着应用层没有及时地“取走”音频数据,导致内部的音频缓冲区溢出。
--- 停止采集,释放资源
参数配置
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes) throws IllegalArgumentException
--- audioSource--- channelConfig
通道数的配置,可选的值以常量的形式定义在 AudioFormat 类中,常用的是 CHANNEL_IN_MONO(单通道),CHANNEL_IN_STEREO(双通道)
--- audioFormat
这个参数是用来配置“数据位宽”的,可选的值也是以常量的形式定义在 AudioFormat 类中,常用的是 ENCODING_PCM_16BIT(16bit),ENCODING_PCM_8BIT(8bit),注意,前者是可以保证兼容所有Android手机的。
--- bufferSizeInBytes
这个是最难理解又最重要的一个参数,它配置的是 AudioRecord 内部的音频缓冲区的大小,该缓冲区的值不能低于一帧“音频帧”(Frame)的大小,一帧音频帧的大小计算如下:
int size = 采样率 x 位宽 x 采样时间 x 通道数
采样时间一般取 2.5ms~120ms 之间,由厂商或者具体的应用决定,我们其实可以推断,每一帧的采样时间取得越短,产生的延时就应该会越小,当然,碎片化的数据也就会越多。
在Android开发中,AudioRecord 类提供了一个帮助你确定这个 bufferSizeInBytes 的函数,原型如下:
int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat);
参数设置
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode) throws IllegalArgumentException
--- streamType--- sampleRateInHz
采样率,从AudioTrack源码的“audioParamCheck”函数可以看到,这个采样率的取值范围必须在 4000Hz~192000Hz 之间。
--- channelConfig
通道数的配置,可选的值以常量的形式定义在 AudioFormat 类中,常用的是 CHANNEL_IN_MONO(单通道),CHANNEL_IN_STEREO(双通道)
--- audioFormat
这个参数是用来配置“数据位宽”的,可选的值也是以常量的形式定义在 AudioFormat 类中,常用的是 ENCODING_PCM_16BIT(16bit),ENCODING_PCM_8BIT(8bit),注意,前者是可以保证兼容所有Android手机的。
--- bufferSizeInBytes
配置的是 AudioTrack 内部的音频缓冲区的大小,该缓冲区的值不能低于一帧“音频帧”(Frame)的大小,一帧音频帧的大小计算如下:
int size = 采样率 x 位宽 x 采样时间 x 通道数
AudioTrack 类提供了一个帮助你确定这个 bufferSizeInBytes 的函数,原型如下:
int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat);
在 AudioTrack 类中,一个是 MODE_STATIC,另一个是 MODE_STREAM
import android.content.pm.PackageManager;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.media.MediaRecorder;
import android.os.AsyncTask;
import android.os.Environment;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;
import com.cl.slack.mediarecorder.R;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import static android.Manifest.permission.RECORD_AUDIO;
import static android.Manifest.permission.WRITE_EXTERNAL_STORAGE;
public class AudioRecorderActivity extends AppCompatActivity
implements View.OnClickListener {
private final int REQ_PERMISSION_AUDIO = 0x01;
private Button mRecode, mPlay;
private File mAudioFile = null;
private Thread mCaptureThread = null;
private boolean mIsRecording,mIsPlaying;
private int mFrequence = 44100;
private int mChannelConfig = AudioFormat.CHANNEL_IN_MONO;
private int mPlayChannelConfig = AudioFormat.CHANNEL_IN_STEREO;
private int mAudioEncoding = AudioFormat.ENCODING_PCM_16BIT;
private PlayTask mPlayer;
private RecordTask mRecorder;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_media_recorder_audio);
mRecode = (Button) findViewById(R.id.audio_recode);
mPlay = (Button) findViewById(R.id.audio_paly);
mRecode.setText("recode");
mPlay.setText("play");
mPlay.setEnabled(false);
mRecode.setOnClickListener(this);
mPlay.setOnClickListener(this);
// mRecorder = new RecordTask();
// mPlayer = new PlayTask();
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.audio_recode:
if (mRecode.getTag() == null) {
startAudioRecode();
} else {
stopAudioRecode();
}
break;
case R.id.audio_paly:
if (mPlay.getTag() == null) {
startAudioPlay();
} else {
stopAudioPlay();
}
break;
}
}
private void startAudioRecode() {
if (checkPermission()) {
PackageManager packageManager = this.getPackageManager();
if (!packageManager.hasSystemFeature(PackageManager.FEATURE_MICROPHONE)) {
showToast("This device doesn't have a mic!");
return;
}
mRecode.setTag(this);
mRecode.setText("stop");
mPlay.setEnabled(false);
File fpath = new File(Environment.getExternalStorageDirectory()
.getAbsolutePath() + "/slack");
fpath.mkdirs();// 创建文件夹
try {
// 创建临时文件,注意这里的格式为.pcm
mAudioFile = File.createTempFile("recording", ".pcm", fpath);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
mRecorder = new RecordTask();
mRecorder.execute();
showToast("Recording started");
} else {
requestPermission();
}
}
private void stopAudioRecode() {
mIsRecording = false;
mRecode.setTag(null);
mRecode.setText("recode");
mPlay.setEnabled(true);
showToast("Recording Completed");
}
private void startAudioPlay() {
mPlay.setTag(this);
mPlay.setText("stop");
mPlayer = new PlayTask();
mPlayer.execute();
showToast("Recording Playing");
}
private void stopAudioPlay() {
mIsPlaying = false;
mPlay.setTag(null);
mPlay.setText("play");
}
private boolean checkPermission() {
int result = ContextCompat.checkSelfPermission(getApplicationContext(),
WRITE_EXTERNAL_STORAGE);
int result1 = ContextCompat.checkSelfPermission(getApplicationContext(),
RECORD_AUDIO);
return result == PackageManager.PERMISSION_GRANTED &&
result1 == PackageManager.PERMISSION_GRANTED;
}
private void requestPermission() {
ActivityCompat.requestPermissions(this, new
String[]{WRITE_EXTERNAL_STORAGE, RECORD_AUDIO}, REQ_PERMISSION_AUDIO);
}
@Override
public void onRequestPermissionsResult(int requestCode,
String permissions[], int[] grantResults) {
switch (requestCode) {
case REQ_PERMISSION_AUDIO:
if (grantResults.length > 0) {
boolean StoragePermission = grantResults[0] ==
PackageManager.PERMISSION_GRANTED;
boolean RecordPermission = grantResults[1] ==
PackageManager.PERMISSION_GRANTED;
if (StoragePermission && RecordPermission) {
showToast("Permission Granted");
} else {
showToast("Permission Denied");
}
}
break;
}
}
private void showToast(String message) {
Toast.makeText(this, message, Toast.LENGTH_LONG).show();
}
class RecordTask extends AsyncTask {
@Override
protected Void doInBackground(Void... arg0) {
mIsRecording = true;
try {
// 开通输出流到指定的文件
DataOutputStream dos = new DataOutputStream(
new BufferedOutputStream(
new FileOutputStream(mAudioFile)));
// 根据定义好的几个配置,来获取合适的缓冲大小
int bufferSize = AudioRecord.getMinBufferSize(mFrequence,
mChannelConfig, mAudioEncoding);
// 实例化AudioRecord
AudioRecord record = new AudioRecord(
MediaRecorder.AudioSource.MIC, mFrequence,
mChannelConfig, mAudioEncoding, bufferSize);
// 定义缓冲
short[] buffer = new short[bufferSize];
// 开始录制
record.startRecording();
int r = 0; // 存储录制进度
// 定义循环,根据isRecording的值来判断是否继续录制
while (mIsRecording) {
// 从bufferSize中读取字节,返回读取的short个数
int bufferReadResult = record
.read(buffer, 0, buffer.length);
// 循环将buffer中的音频数据写入到OutputStream中
for (int i = 0; i < bufferReadResult; i++) {
dos.writeShort(buffer[i]);
}
publishProgress(new Integer(r)); // 向UI线程报告当前进度
r++; // 自增进度值
}
// 录制结束
record.stop();
Log.i("slack", "::" + mAudioFile.length());
dos.close();
} catch (Exception e) {
// TODO: handle exception
Log.e("slack", "::" + e.getMessage());
}
return null;
}
// 当在上面方法中调用publishProgress时,该方法触发,该方法在UI线程中被执行
protected void onProgressUpdate(Integer... progress) {
//
}
protected void onPostExecute(Void result) {
}
}
/**
* AudioTrack
*/
class PlayTask extends AsyncTask {
@Override
protected Void doInBackground(Void... arg0) {
mIsPlaying = true;
int bufferSize = AudioTrack.getMinBufferSize(mFrequence,
mPlayChannelConfig, mAudioEncoding);
short[] buffer = new short[bufferSize ];
try {
// 定义输入流,将音频写入到AudioTrack类中,实现播放
DataInputStream dis = new DataInputStream(
new BufferedInputStream(new FileInputStream(mAudioFile)));
// 实例AudioTrack
AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC,
mFrequence,
mPlayChannelConfig, mAudioEncoding, bufferSize,
AudioTrack.MODE_STREAM);
// 开始播放
track.play();
// 由于AudioTrack播放的是流,所以,我们需要一边播放一边读取
while (mIsPlaying && dis.available() > 0) {
int i = 0;
while (dis.available() > 0 && i < buffer.length) {
buffer[i] = dis.readShort();
i++;
}
// 然后将数据写入到AudioTrack中
track.write(buffer, 0, buffer.length);
}
// 播放结束
track.stop();
dis.close();
} catch (Exception e) {
// TODO: handle exception
Log.e("slack","error:" + e.getMessage());
}
return null;
}
protected void onPostExecute(Void result) {
}
protected void onPreExecute() {
}
}
}