(简要介绍一下流程,具体api的参数说明起来篇幅太大,不清楚的可以自己搜索一下)
MediaExtractor extractor = new MediaExtractor();
extractor.setDataSource(videoFilePath);
private static int selectVideoTrack(MediaExtractor extractor) {
int numTracks = extractor.getTrackCount();
for (int i = 0; i < numTracks; i++) {
MediaFormat format = extractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);
if (mime.startsWith("video/")) {
//音频的话是audio
extractor.selectTrack(i);
return i;
}
}
return -1;
}
int trackIndex = selectVideoTrack(extractor);
MediaFormat mediaFormat = extractor.getTrackFormat(trackIndex);
int width = mediaFormat.getInteger(MediaFormat.KEY_WIDTH);
int height = mediaFormat.getInteger(MediaFormat.KEY_HEIGHT);
Log.d(TAG, "decode video width: " + width + ", height: " + height);
String mime = mediaFormat.getString(MediaFormat.KEY_MIME);
MediaCodec decoder = MediaCodec.createDecoderByType(mime);
mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Flexible);
// 第2个参数是surface,如果设置的话将会直接输出到surface上,我们这里不需要所以传null
decoder.configure(mediaFormat, null, null, 0);
// 解码线程开始
decoder.start();
// 一、给解码器设置一帧输入数据
// 1.获取一个可用的输入buffer,最大等待时长为DEFAULT_TIMEOUT_US
int inputBufferId = decoder.dequeueInputBuffer(DEFAULT_TIMEOUT_US);
ByteBuffer inputBuffer = decoder.getInputBuffer(inputBufferId);
// 2.读取一帧输入数据到buffer中
int sampleSize = extractor.readSampleData(inputBuffer, 0);
// 3.将buffer压入解码队列中,即解码线程就会处理队列中的数据了
decoder.queueInputBuffer(inputBufferId, 0, sampleSize, extractor.getSampleTime(), 0);
// 4.extractor移动到下一帧
extractor.advance();
// 二、从解码器中取出一帧解码后的输出数据
// 1.获取一个可用的输出buffer,最大等待时长为DEFAULT_TIMEOUT_US
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
int outputBufferId = decoder.dequeueOutputBuffer(info, DEFAULT_TIMEOUT_US);
// 2.获取到解码后的Image对象,yuv数据就包含在这里面
Image image = decoder.getOutputImage(outputBufferId);
getDataFromImage(image, mOutputFormat, width, height); //自定义的方法,后面介绍
image.close();
// 3.用完后释放这个输出buffer
decoder.releaseOutputBuffer(outputBufferId, false);
decoder.stop();
decoder.release();
extractor.release();
一般我们想要的都是yuv的数据,常见的yuv数据有 nv12、nv21、i420 等,我们可以根据设置的输出格式来做相应的解析。
public final static int COLOR_FORMAT_I420 = 1; //输出i420
public final static int COLOR_FORMAT_NV21 = 2; //输出nv21
public final static int COLOR_FORMAT_NV12 = 3; //输出nv12
private byte[] mYuvBuffer;
private void getDataFromImage(Image image, int colorFormat, int width, int height) {
Rect crop = image.getCropRect();
Log.d(TAG, "crop width: " + crop.width() + ", height: " + crop.height());
Image.Plane[] planes = image.getPlanes();
byte[] rowData = new byte[planes[0].getRowStride()];
int channelOffset = 0;
int outputStride = 1;
for (int i = 0; i < planes.length; i++) {
switch (i) {
case 0:
channelOffset = 0;
outputStride = 1;
break;
case 1:
if (colorFormat == COLOR_FORMAT_I420) {
channelOffset = width * height;
outputStride = 1;
} else if (colorFormat == COLOR_FORMAT_NV21) {
channelOffset = width * height + 1;
outputStride = 2;
} else if (colorFormat == COLOR_FORMAT_NV12) {
channelOffset = width * height;
outputStride = 2;
}
break;
case 2:
if (colorFormat == COLOR_FORMAT_I420) {
channelOffset = (int) (width * height * 1.25);
outputStride = 1;
} else if (colorFormat == COLOR_FORMAT_NV21) {
channelOffset = width * height;
outputStride = 2;
} else if (colorFormat == COLOR_FORMAT_NV12) {
channelOffset = width * height + 1;
outputStride = 2;
}
break;
default:
}
ByteBuffer buffer = planes[i].getBuffer();
int rowStride = planes[i].getRowStride();
int pixelStride = planes[i].getPixelStride();
int shift = (i == 0) ? 0 : 1;
int w = width >> shift;
int h = height >> shift;
buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
for (int row = 0; row < h; row++) {
int length;
if (pixelStride == 1 && outputStride == 1) {
length = w;
buffer.get(mYuvBuffer, channelOffset, length);
channelOffset += length;
} else {
length = (w - 1) * pixelStride + 1;
buffer.get(rowData, 0, length);
for (int col = 0; col < w; col++) {
mYuvBuffer[channelOffset] = rowData[col * pixelStride];
channelOffset += outputStride;
}
}
if (row < h - 1) {
buffer.position(buffer.position() + rowStride - length);
}
}
}
}
包含一些基本的返回值检查、接口回调、以及可以中途停止解码的方法等。
decode() 函数是会堵塞线程的,注意不要在主线程中调用。
import android.graphics.Rect;
import android.media.Image;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaExtractor;
import android.media.MediaFormat;
import android.util.Log;
import java.io.IOException;
import java.nio.ByteBuffer;
public class VideoDecoder {
private static final String TAG = "VideoDecoder";
public final static int COLOR_FORMAT_I420 = 1;
public final static int COLOR_FORMAT_NV21 = 2;
public final static int COLOR_FORMAT_NV12 = 3;
private static final int DECODE_COLOR_FORMAT = MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Flexible;
private static final long DEFAULT_TIMEOUT_US = 10000;
private int mOutputFormat = COLOR_FORMAT_NV12;
private byte[] mYuvBuffer;
private volatile boolean mStop = false;
public void setOutputFormat(int outputFormat) {
mOutputFormat = outputFormat;
}
public int getOutputFormat() {
return mOutputFormat;
}
public void stop() {
mStop = true;
}
public void decode(String videoFilePath, DecodeCallback decodeCallback) {
mStop = false;
MediaExtractor extractor = null;
MediaCodec decoder = null;
try {
extractor = new MediaExtractor();
extractor.setDataSource(videoFilePath);
int trackIndex = selectVideoTrack(extractor);
if (trackIndex < 0) {
Log.e(TAG, "No video track found in " + videoFilePath);
return;
}
MediaFormat mediaFormat = extractor.getTrackFormat(trackIndex);
String mime = mediaFormat.getString(MediaFormat.KEY_MIME);
decoder = MediaCodec.createDecoderByType(mime);
if (isColorFormatSupported(DECODE_COLOR_FORMAT, decoder.getCodecInfo().getCapabilitiesForType(mime))) {
mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, DECODE_COLOR_FORMAT);
Log.i(TAG, "set decode color format to type " + DECODE_COLOR_FORMAT);
} else {
Log.i(TAG, "unable to set decode color format, color format type " + DECODE_COLOR_FORMAT + " not " +
"supported");
}
int width = mediaFormat.getInteger(MediaFormat.KEY_WIDTH);
int height = mediaFormat.getInteger(MediaFormat.KEY_HEIGHT);
Log.d(TAG, "decode video width: " + width + ", height: " + height);
int yuvLength = width * height * 3 / 2;
if (mYuvBuffer == null || mYuvBuffer.length != yuvLength) {
mYuvBuffer = new byte[yuvLength];
}
decoder.configure(mediaFormat, null, null, 0);
decoder.start();
decodeFramesToImage(decoder, extractor, width, height, decodeCallback);
} catch (IOException e) {
e.printStackTrace();
} finally {
if (decoder != null) {
decoder.stop();
decoder.release();
decoder = null;
}
if (extractor != null) {
extractor.release();
extractor = null;
}
}
}
private static int selectVideoTrack(MediaExtractor extractor) {
int numTracks = extractor.getTrackCount();
for (int i = 0; i < numTracks; i++) {
MediaFormat format = extractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);
if (mime.startsWith("video/")) {
extractor.selectTrack(i);
return i;
}
}
return -1;
}
private boolean isColorFormatSupported(int colorFormat, MediaCodecInfo.CodecCapabilities caps) {
for (int c : caps.colorFormats) {
if (c == colorFormat) {
return true;
}
}
return false;
}
private void decodeFramesToImage(MediaCodec decoder, MediaExtractor extractor, int width, int height,
DecodeCallback decodeCallback) {
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
boolean sawInputEOS = false;
boolean sawOutputEOS = false;
int outputFrameCount = 0;
while (!mStop && !sawOutputEOS) {
if (!sawInputEOS) {
int inputBufferId = decoder.dequeueInputBuffer(DEFAULT_TIMEOUT_US);
if (inputBufferId >= 0) {
ByteBuffer inputBuffer = decoder.getInputBuffer(inputBufferId);
int sampleSize = extractor.readSampleData(inputBuffer, 0);
if (sampleSize < 0) {
decoder.queueInputBuffer(inputBufferId, 0, 0, 0L, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
sawInputEOS = true;
Log.i(TAG, "sawInputEOS is true");
} else {
decoder.queueInputBuffer(inputBufferId, 0, sampleSize, extractor.getSampleTime(), 0);
extractor.advance();
}
}
}
int outputBufferId = decoder.dequeueOutputBuffer(info, DEFAULT_TIMEOUT_US);
if (outputBufferId >= 0) {
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
sawOutputEOS = true;
Log.i(TAG, "sawOutputEOS is true");
}
if (info.size > 0) {
outputFrameCount++;
Image image = decoder.getOutputImage(outputBufferId);
getDataFromImage(image, mOutputFormat, width, height);
image.close();
decoder.releaseOutputBuffer(outputBufferId, false);
// callback
if (decodeCallback != null) {
decodeCallback.onDecode(mYuvBuffer, width, height, outputFrameCount, info.presentationTimeUs);
}
}
}
}
if (decodeCallback != null) {
if (mStop) {
decodeCallback.onStop();
} else {
decodeCallback.onFinish();
}
}
}
private void getDataFromImage(Image image, int colorFormat, int width, int height) {
Rect crop = image.getCropRect();
int format = image.getFormat();
Log.d(TAG, "crop width: " + crop.width() + ", height: " + crop.height());
Image.Plane[] planes = image.getPlanes();
byte[] rowData = new byte[planes[0].getRowStride()];
int channelOffset = 0;
int outputStride = 1;
for (int i = 0; i < planes.length; i++) {
switch (i) {
case 0:
channelOffset = 0;
outputStride = 1;
break;
case 1:
if (colorFormat == COLOR_FORMAT_I420) {
channelOffset = width * height;
outputStride = 1;
} else if (colorFormat == COLOR_FORMAT_NV21) {
channelOffset = width * height + 1;
outputStride = 2;
} else if (colorFormat == COLOR_FORMAT_NV12) {
channelOffset = width * height;
outputStride = 2;
}
break;
case 2:
if (colorFormat == COLOR_FORMAT_I420) {
channelOffset = (int) (width * height * 1.25);
outputStride = 1;
} else if (colorFormat == COLOR_FORMAT_NV21) {
channelOffset = width * height;
outputStride = 2;
} else if (colorFormat == COLOR_FORMAT_NV12) {
channelOffset = width * height + 1;
outputStride = 2;
}
break;
default:
}
ByteBuffer buffer = planes[i].getBuffer();
int rowStride = planes[i].getRowStride();
int pixelStride = planes[i].getPixelStride();
int shift = (i == 0) ? 0 : 1;
int w = width >> shift;
int h = height >> shift;
buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
for (int row = 0; row < h; row++) {
int length;
if (pixelStride == 1 && outputStride == 1) {
length = w;
buffer.get(mYuvBuffer, channelOffset, length);
channelOffset += length;
} else {
length = (w - 1) * pixelStride + 1;
buffer.get(rowData, 0, length);
for (int col = 0; col < w; col++) {
mYuvBuffer[channelOffset] = rowData[col * pixelStride];
channelOffset += outputStride;
}
}
if (row < h - 1) {
buffer.position(buffer.position() + rowStride - length);
}
}
}
}
public interface DecodeCallback {
// 返回的yuv数据格式由OUTPUT_COLOR_FORMAT指定
void onDecode(byte[] yuv, int width, int height, int frameCount, long presentationTimeUs);
// 解码完成
void onFinish();
// 异常中断
void onStop();
}
}
VideoDecoder mVideoDecoder = new VideoDecoder();
mVideoDecoder.setOutputFormat(VideoDecoder.COLOR_FORMAT_NV12); // 设置输出nv12的数据
// 某某线程中
mVideoDecoder.decode("/sdcard/test.mp4", new VideoDecoder.DecodeCallback() {
@Override
public void onDecode(byte[] yuv, int width, int height, int frameCount, long presentationTimeUs) {
Log.d(TAG, "frameCount: " + frameCount + ", presentationTimeUs: " + presentationTimeUs);
// yuv数据操作,例如保存或者再去编码等
}
@Override
public void onFinish() {
Log.d(TAG, "onFinish");
}
@Override
public void onStop() {
Log.d(TAG, "onStop");
}
});