章节
Camera开发系列之一-显示摄像头实时画面
Camera开发系列之二-相机预览数据回调
Camera开发系列之三-相机数据硬编码为h264
Camera开发系列之四-使用MediaMuxer封装编码后的音视频到mp4容器
Camera开发系列之五-使用MediaExtractor制作一个简易播放器
Camera开发系列之六-使用mina框架实现视频推流
Camera开发系列之七-使用GLSurfaceviw绘制Camera预览画面
简介
一个音视频文件是由音频和视频组成的,我们可以通过MediaExtractor把音频或视频给单独抽取出来,抽取出来的音频和视频能单独播放。
主要方法有下面几个:
- setDataSource(String path):可以设置本地文件和网络文件路径
- getTrackCount():得到文件的通道数(即音视频通道)
- getTrackFormat(int index):获取指定(index)的通道格式,即MediaFormat,通过MediaFormat来获取track的详细信息,如:MimeType、分辨率、采样频率、帧率等等
- getSampleTime():返回当前的时间戳
- readSampleData(ByteBuffer byteBuf, int offset):把指定通道中的数据按偏移量读取到ByteBuffer中;然后就可以将数据给MediaCodec进行处理
- advance():读取下一帧数据
- release(): 读取结束后释放资源
解码流程
使用MediaExtractor可以分离出来音视频数据,那么如何使用MediaCodec 来播放解码出来的数据,实现一个视频播放器呢?我觉得需要如下几部就可以实现:
- 创建视频解码线程,MediaExtractor解析出视频数据,拿给MediaCodec 处理后,渲染到surface上显示
- 创建音频解码线程,MediaExtractor解析出视频数据,拿给MediaCodec 解码为原始音频数据,交由AudioTrack进行播放
- 解码播放延时同步,这里的同步是以外部时钟为基准,音视频播放哪一个快了,就等待。
因为之前一直在介绍MediaCodec ,相信大家对它的使用都不陌生了,MediaExtractor的使用也很简单,这里直接贴上MediaExtractor解析视频数据的代码:
private Surface mSurface;
private MediaCodec mCodec;
private static boolean isDecoding = false;
private static boolean isPause = false;
public void startDecodeFromMPEG_4(final String MPEG_4_Path, Surface surface){
if (!new File(MPEG_4_Path).exists()){
try {
throw new FileNotFoundException("MPEG_4 file not find");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
this.mSurface = surface;
isDecoding = true;
Thread mediaDecodeTrhread = new Thread(new Runnable() {
@Override
public void run() {
try {
MediaExtractor videoExtractor = new MediaExtractor(); //MediaExtractor作用是将音频和视频的数据进行分离
videoExtractor.setDataSource(MPEG_4_Path);
int videoTrackIndex = -1; //提供音频的音频轨
//多媒体流中video轨和audio轨的总个数
for (int i = 0; i < videoExtractor.getTrackCount(); i++) {
MediaFormat format = videoExtractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);//主要描述mime类型的媒体格式
if (mime.startsWith("video/")) { //找到音轨
videoExtractor.selectTrack(i);
videoTrackIndex = i;
int width = format.getInteger(MediaFormat.KEY_WIDTH);
int height = format.getInteger(MediaFormat.KEY_HEIGHT);
float time = format.getLong(MediaFormat.KEY_DURATION) / 1000000;
try {
mCodec = MediaCodec.createDecoderByType(mime);
mCodec.configure(format, mSurface, null, 0);
mCodec.start();
if (mVideoCallBack != null){
mVideoCallBack.onGetVideoInfo(width,height,time);
}
} catch (IOException e) {
e.printStackTrace();
}
break;
}
}
MediaCodec.BufferInfo videoBufferInfo = new MediaCodec.BufferInfo();
long startTimeStamp = System.currentTimeMillis(); //记录开始解码的时间
while (isDecoding){
// 暂停
if (isPause) {
continue;
}
int inputBufferIndex = mCodec.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
inputBuffer = mCodec.getInputBuffer(inputBufferIndex);
}else {
ByteBuffer[] inputBuffers = mCodec.getInputBuffers();
inputBuffer = inputBuffers[inputBufferIndex];
}
//检索当前编码的样本并将其存储在字节缓冲区中
int sampleSize = videoExtractor.readSampleData(inputBuffer, 0);
if (sampleSize < 0) {
//如果没有可获取的样本则退出循环
mCodec.queueInputBuffer(inputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
videoExtractor.unselectTrack(videoTrackIndex);
break;
} else {
mCodec.queueInputBuffer(inputBufferIndex, 0, sampleSize, videoExtractor.getSampleTime(), 0);
videoExtractor.advance();
}
}
int outputBufferIndex = mCodec.dequeueOutputBuffer(videoBufferInfo, TIMEOUT_USEC);
while (outputBufferIndex >= 0) {
decodeDelay(videoBufferInfo, startTimeStamp);
mCodec.releaseOutputBuffer(outputBufferIndex, true);
outputBufferIndex = mCodec.dequeueOutputBuffer(videoBufferInfo, 0);
}
}
stopDecodeSync();
videoExtractor.release();
}catch (IOException e){
e.printStackTrace();
}
}
});
mediaDecodeTrhread.start();
}
private void stopDecodeSync(){
if (null != mCodec){
mCodec.stop();
mCodec.release();
mCodec = null;
}
}
复制代码
MediaExtractor解析音频数据的代码:
private static volatile boolean isDecoding = false;
private static volatile boolean isPause = false;
private MediaCodec mCodec;
private static AudioDecoder mAudioDecoder;
private final String mime = "audio/mp4a-latm";
private MediaCodec.BufferInfo mBufferInfo;
private BufferedOutputStream outputStream;
public void startDecodeFromMPEG_4(final String MPEG_4_Path){
if (!new File(MPEG_4_Path).exists()){
try {
throw new FileNotFoundException("MPEG_4 file not find");
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
isDecoding = true;
Thread audioDecodeTrhread = new Thread(new Runnable() {
@Override
public void run() {
try {
MediaExtractor audioExtractor = new MediaExtractor(); //MediaExtractor作用是将音频和视频的数据进行分离
audioExtractor.setDataSource(MPEG_4_Path);
AudioTrack audioTrack = null;
int audioExtractorTrackIndex = -1; //提供音频的音频轨
//多媒体流中video轨和audio轨的总个数
for (int i = 0; i < audioExtractor.getTrackCount(); i++) {
MediaFormat format = audioExtractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);//主要描述mime类型的媒体格式
if (mime.startsWith("audio/")) { //找到音轨
audioExtractor.selectTrack(i);
audioExtractorTrackIndex = i;
int audioChannels = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
int audioSampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
int minBufferSize = AudioTrack.getMinBufferSize(audioSampleRate,
(audioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
AudioFormat.ENCODING_PCM_16BIT);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
audioSampleRate,
(audioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
AudioFormat.ENCODING_PCM_16BIT,
minBufferSize,
AudioTrack.MODE_STREAM);
audioTrack.play();
try {
mCodec = MediaCodec.createDecoderByType(mime);
mCodec.configure(format, null, null, 0);
mCodec.start();
} catch (IOException e) {
e.printStackTrace();
}
break;
}
}
MediaCodec.BufferInfo audioBufferInfo = new MediaCodec.BufferInfo();
ByteBuffer[] inputBuffers = mCodec.getInputBuffers();
ByteBuffer[] outputBuffers = mCodec.getOutputBuffers();
long startTimeStamp = System.currentTimeMillis();
while (isDecoding){
// 暂停
if (isPause) {
continue;
}
int inputBufferIndex = mCodec.dequeueInputBuffer(-1);
if (inputBufferIndex >= 0) {
ByteBuffer inputBuffer;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
inputBuffer = mCodec.getInputBuffer(inputBufferIndex);
}else {
inputBuffer = inputBuffers[inputBufferIndex];
}
if (inputBuffer == null) return;
//检索当前编码的样本并将其存储在字节缓冲区中
int sampleSize = audioExtractor.readSampleData(inputBuffer, 0);
if (sampleSize < 0) {
//如果没有可获取的样本则退出循环
mCodec.queueInputBuffer(inputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
audioExtractor.unselectTrack(audioExtractorTrackIndex);
break;
} else {
mCodec.queueInputBuffer(inputBufferIndex, 0, sampleSize, audioExtractor.getSampleTime(), 0);
audioExtractor.advance();
}
}
int outputBufferIndex = mCodec.dequeueOutputBuffer(audioBufferInfo, TIMEOUT_USEC);
ByteBuffer outputBuffer;
while (outputBufferIndex >= 0) {
decodeDelay(audioBufferInfo,startTimeStamp);
//获取解码后的ByteBuffer
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
outputBuffer = mCodec.getOutputBuffer(outputBufferIndex);
}else {
outputBuffer = outputBuffers[outputBufferIndex];//6.0以上,使用这个部分机型出现crash
}
//用来保存解码后的数据
byte[] outData = new byte[audioBufferInfo.size];
outputBuffer.get(outData);
//清空缓存
outputBuffer.clear();
//播放解码后的数据
if (audioTrack != null){
audioTrack.write(outData,0,outData.length);
}
//释放已经解码的buffer
mCodec.releaseOutputBuffer(outputBufferIndex, false);
outputBufferIndex = mCodec.dequeueOutputBuffer(audioBufferInfo, TIMEOUT_USEC);
}
}
if (audioTrack != null){
audioTrack.stop();
audioTrack.release();
audioTrack = null;
}
audioExtractor.release();
stopDecodeSync();
}catch (IOException e){
e.printStackTrace();
}
}
});
audioDecodeTrhread.start();
}
复制代码
音视频同步:
/**
* 延迟解码
* @param bufferInfo
* @param startMillis
*/
private void decodeDelay(MediaCodec.BufferInfo bufferInfo, long startMillis) {
long current = bufferInfo.presentationTimeUs / 1000 - (System.currentTimeMillis() - startMillis);
if (current > 0) {
try {
Thread.sleep(current);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
复制代码
上面的代码只是简单地获取本地视频文件,分别将视频帧解码显示和音频帧解码播放出来,还存在同步问题。同步无非就是,当视频帧播放快了,则等待音频帧播放完或者加快、丢弃音频帧,当音频播放快了,则判断是否需要加快视频帧的播放。这里不同的同步方式,产生了几种不同的同步策略,分别是视频同步音频、音频同步视频、以外部时钟作为同步基准等等。这些需要考虑到的地方太多了,所以如果想做一个音视频项目,而不是项目中的一个小功能,最好还是选用市面上比较成熟的方案,比如ijkplayer,ffmpeg等。