记录一下之前项目的实际使用过程。
将按照Java层------>JNI接口------>JNI代码中使用FFmpeg解码。
首先Java层:
public class CodecWrapper {
//加载FFmpeg的动态so库
static {
System.loadLibrary("codec");
System.loadLibrary("avutil-55");
System.loadLibrary("swresample-2");
System.loadLibrary("avcodec-57");
System.loadLibrary("avformat-57");
System.loadLibrary("swscale-4");
System.loadLibrary("postproc-54");
System.loadLibrary("avfilter-6");
System.loadLibrary("avdevice-57");
}
//进行FFmpeg解码器部分的初始化
public native long get_codec();
//传入数据,解码。没来一帧数据都会调用这个方法。
//surface是解码之后的数据显示的地方,作为一个对象传入到JNI。
public native void decode_stream(byte[] frame, int length, long decoder, Surface surface);
//解码完成,释放资源
public native void release_codec(long decoder);
//解码之后,图像数据将被添加到Surface的数据缓冲区中,完成显示。它通过SurfaceView对象可以获得。
private Surface mSurface;
private long mDecoderHandle;
public CodecWrapper(Surface surface){
mSurface = surface;
init();
}
public void init(){
mDecoderHandle = get_codec();
}
public void decodeStream(byte[] frame, int length){
decode_stream(frame, length, mDecoderHandle, mSurface);
}
public void release(){
release_codec(mDecoderHandle);
}
@Override
protected void finalize() throws Throwable {
try {
release();
} finally {
try {
super.finalize();
} catch (Throwable e) {
e.printStackTrace();
}
}
}
//this method was called by jni when one frame was decode ok
/*public void onFrameDecode(byte[] data, int width, int height){
getOneFrame(data, width, height);
}
abstract void getOneFrame(byte[] data, int width, int height);*/
public void onFrameDecode(int[] data, int width, int height){
getOneFrame(data, width, height);
}
abstract void getOneFrame(int[] data, int width, int height);
}
**首先,**需要调用CodecWrapper的构造方法,传入Surface对象,然后调用native方法get_codec,对FFmpeg的解码器进行初始化。
**之后,**每来一帧数据,就会调用decode_stream方法进行真正的数据解码。
**解码完成之后,**调用release_codec方法释放JNI层的资源,防止出现内存泄露。
接下来看JNI接口的部分代码:
com_xxx_xxx_CodecWrapper.h
#include
/*
* Method: 初始化
*/
JNIEXPORT jint JNICALL Java_com_xxx_CodecWrapper_get_codec
(JNIEnv *, jobject);
/*
* Method: 解码
* Signature: ([BIILcom/xxxx/CodecWrapper;)V
*/
JNIEXPORT void JNICALL Java_com_xxx_CodecWrapper_decode_stream
(JNIEnv *, jobject, jbyteArray, jint, jint, jobject);
/*
* Method: 释放资源
* Signature: (I)V
*/
JNIEXPORT void JNICALL Java_com_xxx_CodecWrapper_release_1codec
(JNIEnv *, jobject, jint);
com_xxx_xxx_CodecWrapper.cpp
#include
#include "../decoder.h"
#include "../yuv_2_rgb.h"
#include "../android/android_native_window.h"
#include
#include
#include
#include
#include "../decoder.h"
#include "../timeutil.h"
#include
enum AVPixelFormat pixelFormat = AV_PIX_FMT_RGB565LE;
int native_pix_format = PIXEL_FORMAT_RGB_565;
//对当前JVM环境的封装,记录JNIEnv、Java层的对象(Java中哪个对象调用的当前的JNI方法)、Surface对象
typedef struct _EnvPackage{
JNIEnv *env;
jobject *obj;
jobject *surface;
} EnvPackage;
JNIEXPORT long JNICALL Java_com_xxxx_CodecWrapper_get_1codec
(JNIEnv *env, jobject obj){
decoder *p = new decoder();
p->initialize(pixelFormat);
return p;
}
JNIEXPORT void JNICALL Java_com_xxxx_CodecWrapper_decode_1stream(JNIEnv *env, jobject obj, jbyteArray jdata, jint length, jlong this_obj_long, jobject surface){
decoder *this_obj = this_obj_long;
//将Java层的byte数组,转成JNI中的jbyte指针数组
jbyte *cdata = env->GetByteArrayElements(jdata, JNI_FALSE);
jbyte *cdata_rec = cdata;
if(cdata != NULL) {
EnvPackage package;
package.env = env;
package.obj = &obj;
package.surface = &surface;
//解码显示
this_obj->decodeFrame(cdata, length, handle_data, &package,this_obj);
}else{
LOGE("stream data is NULL");
}
//释放资源
env->ReleaseByteArrayElements(jdata, cdata_rec, 0);
}
JNIEXPORT void JNICALL Java_com_xxxx_CodecWrapper_release_1codec(JNIEnv *env, jobject obj, long this_obj_long){
decoder *this_obj = this_obj_long;
this_obj->close();
delete this_obj;
}
//为了计算解码帧率
int frame_count = 0;
//使用FFmpeg解码之后生成yuv420p数据后,会调用这个方法
//这里的pFrame就包含着yuv420p数据
void handle_data(AVFrame *pFrame, void *param, void *ctx,decoder *this_obj){
RenderParam *renderParam = (RenderParam *)param;
//为了显示,还需要转成ARGB数据
AVFrame *rgbFrame = yuv420p_2_argb(pFrame, renderParam->swsContext, renderParam->avCodecContext, pixelFormat);//AV_PIX_FMT_RGB565LE
if (this_obj->aNativeWindow == NULL){
EnvPackage *envPackage = (EnvPackage *)ctx;
//通过ANativeWindow_fromSurface方法,将从Java层传下来的Surface对象转成NativeWindow类型的对象
this_obj->aNativeWindow = ANativeWindow_fromSurface(envPackage->env, *(envPackage->surface));
}
VoutInfo voutInfo;
voutInfo.buffer = rgbFrame->data[0];
voutInfo.buffer_width = rgbFrame->width;
voutInfo.buffer_height = rgbFrame->height;
voutInfo.pix_format = native_pix_format;
//显示
android_native_window_display(this_obj->aNativeWindow, &voutInfo);
LOGE("display frame ", frame_count++);
fps();//计算帧率
//显示完了之后需要释放这个NativeWindow资源对象
ANativeWindow_release(aNativeWindow);
//释放图像数据
av_free(rgbFrame->data[0]);
av_free(rgbFrame);
}
OK,上面就是JNI接口层的代码,里面只是简单的更下层代码的封装,接下来看JNI层代码是如何实现解码并显示的:
decoder.h
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "../log.h"
#include
#include
#include
#include "ffmpeg/libavcodec/avcodec.h"
#include "ffmpeg/libavformat/avformat.h"
#include "ffmpeg/libswscale/swscale.h"
#include
//用于接收从Java层传过来的H264数据,
//这里是写死的,因为H264每一帧大小都不一样,1024 * 600是大于当时项目一帧H264数据大小,要不会包内存溢出的异常
#define INBUF_SIZE 1024 * 600
//#define INBUF_SIZE 8192
typedef struct _RenderParam{
SwsContext *swsContext;
AVCodecContext *avCodecContext;
}RenderParam;
//记录H264 NALU信息
typedef struct _NalInfo{
uint8_t forbidden_zero_bit;
uint8_t nal_ref_idc;
uint8_t nal_unit_type;
} NalInfo;
//decoder 类
class decoder {
private:
int frame_count;//记录解码成功并显示完成的帧数
AVFrame *frame;
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;//它用来存放要解析的数据
AVCodecParserContext *parser;//从传进来的数据中解析出H264帧数据,传进来的数据有可能是多个H264数据,需要一个一个解析出来
AVCodec *codec;//用于音视频解码,用于创建AVCodecContext 对象
AVCodecContext *codecContext;//用于音视频解码
SwsContext *img_convert_ctx;//用于执行图像数据格式的转化和缩放
enum AVPixelFormat pixelFormat;//图像数据格式
//rgb frame cache
AVFrame *pFrameRGB;//缓存最后转成的RGB数据
public:
// pthread_mutex_t mutex1;
RenderParam *renderParam = NULL;
ANativeWindow *aNativeWindow;//由Surface转成的JNI层中的界面窗口对象
decoder();
void initialize(enum AVPixelFormat format);//初始化
//解码的方法
int decodeFrame(const char *data, int length, void (*handle_data)(AVFrame *pFrame, void *param, void *ctx,decoder *object), void *ctx,decoder *object);
void close();
void setFrameRGB(AVFrame *frame);
int handleH264Header(uint8_t* ptr, NalInfo *nalInfo);
AVFrame *getFrameRGB();
};
decoder.cpp
#include "decoder.h"
#include "timeutil.h"
//相当于decoder的构造方法,构造方法中进行一些成员变量的初始化工作。
decoder::decoder() :codec(NULL), codecContext(NULL), frame_count(0), frame(NULL), parser(NULL) {
}
//初始化工作
void decoder::initialize(enum AVPixelFormat format) {
/* register all the codecs 进行必要的初始化注册操作,比如解码器、编码器、解析器、硬件加速器等。只有调用了这个方法,才能使用解码器。它几乎是所有FFmpeg程序第一个调用的方法。 */
avcodec_register_all();
//创建一个AVPacket对象,它用来存放要解析的数据。
av_init_packet(&avpkt);
//为RenderParam 对象分配内存空间
renderParam = (RenderParam *)malloc(sizeof(RenderParam));
/* find the x264 video decoder 获取H264解码器*/
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
//视音频流对应的结构体,用于视音频编解码。创建AVCodecContext结构体对象,参数为AVCodec对象。
codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* we do not send complete frames */
if (codec->capabilities & CODEC_CAP_TRUNCATED)
codecContext->flags |= CODEC_FLAG_TRUNCATED;
/* open it 使用AVCodec初始化AVCodecContext,指向相应的数据。 打开相应的解码器*/
if (avcodec_open2(codecContext, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
//给AVFrame分配内存。
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
//初始化H264解析器
parser = av_parser_init(AV_CODEC_ID_H264);
if (!parser) {
std::cout << "cannot create parser" << std::endl;
exit(1);
}
printf(" decoder init ..........\n");
frame_count = 0;
//初始化SwsContext
img_convert_ctx =NULL;
//初始化存放RGB类型数据的对象
pFrameRGB = NULL;
//pixelFormat = AV_PIX_FMT_BGRA;
//pixelFormat = AV_PIX_FMT_RGB565LE;
//pixelFormat = AV_PIX_FMT_BGR24;
pixelFormat = format;
}
//在解码的部分加锁
static pthread_mutex_t buffer_lock = PTHREAD_MUTEX_INITIALIZER;
int decoder::decodeFrame(const char *data, int length, void (*handle_data)(AVFrame *pFrame, void *param, void *ctx,decoder *this_obj), void *ctx,decoder *this_obj) {
pthread_mutex_lock (&buffer_lock);
int cur_size = length;
int ret = 0;
LOGI("decodeFrame 1 ,length is %d: ",length);
//将inbuf从0到length部分全部置0,length表示传入H24数据的长度
memset(inbuf , 0, length);
//将inbuf从length到length+FF_INPUT_BUFFER_PADDING_SIZE部分全部置0,
//之所以有这个FF_INPUT_BUFFER_PADDING_SIZE,是因为解码器需要每一段H264数据之间需要有一个Padding
memset(inbuf + length, 0, FF_INPUT_BUFFER_PADDING_SIZE);
//将data复制到inbuf中
memcpy(inbuf, data, length);
data = NULL;
//指向inbuf的开始内存地址
const uint8_t *cur_ptr = inbuf;
// Parse input stream to check if there is a valid frame.
//循环
while(cur_size >0)
{
//从当前H264数据中解析出一帧数据,parsedLength 就是这一帧数据的长度
int parsedLength = av_parser_parse2(parser, codecContext, &avpkt.data,
&avpkt.size, (const uint8_t*)cur_ptr , cur_size, AV_NOPTS_VALUE,
AV_NOPTS_VALUE, AV_NOPTS_VALUE);
cur_ptr += parsedLength;//cur_ptr 指向下一帧数据
cur_size -= parsedLength;//当前待解析的H264数据所剩余的长度
LOGI("decodeFrame 3-1 cur_size == %d parsedLength == %d,avpkt.size=%d", cur_size, parsedLength,avpkt.size);
// 67 sps
// 68 pps
// 65 i
// 61 p
//LOGE("parsedLength %d %x %x %x %x %x %x %x %x", parsedLength, cur_ptr[0], cur_ptr[1], cur_ptr[2], cur_ptr[3], cur_ptr[4], cur_ptr[5], cur_ptr[6], cur_ptr[7]);
//LOGE("parsedLength %d %x %x %x %x %x %x %x %x", parsedLength, *(cur_ptr-parsedLength), *(cur_ptr-parsedLength+1), *(cur_ptr-parsedLength+2), *(cur_ptr-parsedLength+3), *(cur_ptr-parsedLength+4), *(cur_ptr-parsedLength+5), *(cur_ptr-parsedLength+6), *(cur_ptr-parsedLength+7));
NalInfo nalInfo;
//解析H264 NALU Header头部信息
ret = handleH264Header(cur_ptr-parsedLength, &nalInfo);
if(ret == 0){
}
if (!avpkt.size) {//如果没有数据
LOGI("avpkt.size = %d,continue",avpkt.size);
continue;
} else {
int len;//解码成功的长度
int got_frame;//该值为0表明没有图像可以解码,否则表明有图像可以解码
//codecContext:编解码上下文环境,定义了编解码操作的一些细节;
//frame:输出参数,解码之后的数据
//got_frame
//avpkt:输入参数,包含待解码的H264数据
len = avcodec_decode_video2(codecContext, frame, &got_frame, &avpkt);
LOGI("decodeFrame 4, decode len is %d,got_frame = %d", len, got_frame);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", frame_count);
continue;
}
if (got_frame) {//如果解码成功
frame_count++;
LOGE("frame %d", frame_count);
if (img_convert_ctx == NULL) {
//创建一个SwsContext 对象,用于解码之后的数据的格式转化和缩放工作
img_convert_ctx = sws_getContext(codecContext->width, codecContext->height,
codecContext->pix_fmt, codecContext->width,
codecContext->height,
pixelFormat, SWS_BICUBIC, NULL, NULL,
NULL);
//填充参数值
renderParam->swsContext = img_convert_ctx;
renderParam->avCodecContext = codecContext;
}
if (img_convert_ctx != NULL) {
//就是com_xxx_xxx_CodecWrapper.cpp中的handle_data方法
handle_data(frame, renderParam, ctx, this_obj);
}
}
//解锁
pthread_mutex_unlock (&buffer_lock);
return length;
}
//解码之后,做一些资源释放工作
void decoder::close() {
av_free_packet(&avpkt);
avpkt.data = NULL;
avpkt.size = 0;
if (parser) {
av_parser_close(parser);
parser = NULL;
}
if(renderParam){
free(renderParam);
renderParam = NULL;
}
if(pFrameRGB){
delete pFrameRGB;
pFrameRGB = NULL;
}
avcodec_close(codecContext);
av_free(codecContext);
av_frame_free(&frame);
if(img_convert_ctx!=NULL)
{
sws_freeContext(img_convert_ctx);
img_convert_ctx = NULL;
}
if (aNativeWindow != NULL){//窗口资源
ANativeWindow_release(aNativeWindow);
}
printf(" decoder close ..........\n");
}
AVFrame * decoder::getFrameRGB() {
return pFrameRGB;
}
//解析H264数据帧NALU的头部
int decoder::handleH264Header(uint8_t* ptr, NalInfo *nalInfo){
LOGI("handleH264Header 1 ")
int startIndex = 0;
uint32_t *checkPtr = (uint32_t *)ptr;
if(*checkPtr == 0x01000000){ // 00 00 00 01
startIndex = 4;
}else if(*(checkPtr) == 0 && *(checkPtr+1)&0x01000000){ // 00 00 00 00 01
startIndex = 5;
}
if(!startIndex){//没有找到H264头部
return -1;
}else{
ptr = ptr + startIndex;
nalInfo->nal_unit_type = 0x1f & *ptr;
if(nalInfo->nal_unit_type == 5 || nalInfo->nal_unit_type == 7 || nalInfo->nal_unit_type == 8 || nalInfo->nal_unit_type == 2){ //I frame
LOGE("I frame");
}else if(nalInfo->nal_unit_type == 1){
LOGE("P frame");
}
}
LOGI("handleH264Header 2 ")
return 0;
}
void decoder::setFrameRGB(AVFrame *frame) {
pFrameRGB = frame;
}
yuv_2_rgb.cpp
//由avcodec_decode_video2解码之后的数据时YUV420P类型的数据,这里转成ARGB数据
AVFrame *yuv420p_2_argb(AVFrame *frame, SwsContext *swsContext, AVCodecContext *avCodecContext, enum AVPixelFormat format){
AVFrame *pFrameRGB = NULL;
uint8_t *out_bufferRGB = NULL;
//为RGB数据分配内存
pFrameRGB = av_frame_alloc();
pFrameRGB->width = frame->width;
pFrameRGB->height = frame->height;
int size = avpicture_get_size(format, avCodecContext->width, avCodecContext->height);
out_bufferRGB = av_malloc(size * sizeof(uint8_t));
//给pFrameRGB帧加上分配的内存; //AV_PIX_FMT_ARGB
avpicture_fill((AVPicture *)pFrameRGB, out_bufferRGB, format, avCodecContext->width, avCodecContext->height);
//YUV to RGB
sws_scale(swsContext, frame->data, frame->linesize, 0, avCodecContext->height, pFrameRGB->data, pFrameRGB->linesize);
return pFrameRGB;
}
android_native_window.h
#include
#include
enum {
PIXEL_FORMAT_RGBA_8888 = 1,
PIXEL_FORMAT_RGBX_8888 = 2,
PIXEL_FORMAT_RGB_565 = 3
};
typedef struct _VoutInfo{
/**
WINDOW_FORMAT_RGBA_8888 = 1,
WINDOW_FORMAT_RGBX_8888 = 2,
WINDOW_FORMAT_RGB_565 = 4,*/
uint32_t pix_format;
uint32_t buffer_width;
uint32_t buffer_height;
uint8_t *buffer;
} VoutInfo;
typedef struct _VoutRender{
uint32_t pix_format;
uint32_t window_format;
//渲染到窗口上
void (*render)(ANativeWindow_Buffer *nwBuffer, VoutInfo *voutInfo);
}VoutRender;
//显示
void android_native_window_display(ANativeWindow *aNativeWindow, VoutInfo *voutInfo);
android_native_window.cpp
#include "android_native_window.h"
//应该是解码渲染的最后一步。最终渲染到屏幕上面就是会走到这里
//当我们解码完毕后,需要把rgb帧渲染到画布上,这个过程参考了ijkplayer的实现,将surface作为一个参数传入jni层,
//拿到surface的缓冲区后将生成的rgb数据直接copy到这个缓冲区即可完成显示。
void render_on_rgb(ANativeWindow_Buffer *nwBuffer, VoutInfo *voutInfo, int bpp){
int stride = nwBuffer->stride;
int dst_width = nwBuffer->width;
int dst_height = nwBuffer->height;
LOGE("ANativeWindow stride %d width %d height %d", stride, dst_width, dst_height);
int line = 0;
int src_line_size = voutInfo->buffer_width * bpp / 8;
int dst_line_size = stride * bpp / 8;
int min_height = dst_height < voutInfo->buffer_height ? dst_height : voutInfo->buffer_height;
if(src_line_size == dst_line_size) {
//void *memcpy(void*dest, const void *src, size_t n); //将rgb数据拷贝给suface的缓冲区,完成现实
memcpy((__uint8_t *) nwBuffer->bits, (__uint8_t *) voutInfo->buffer, src_line_size * min_height);
}else{
//直接copy
/*for(int i=0; idata[0]+ width*i * 2), width * 2);
line += stride * 2;
}*/
//使用ffmpeg的函数 实现相同功能
av_image_copy_plane(nwBuffer->bits, dst_line_size, voutInfo->buffer, src_line_size, src_line_size, min_height);
}
}
void render_on_rgb8888(ANativeWindow_Buffer *nwBuffer, VoutInfo *voutInfo){
render_on_rgb(nwBuffer, voutInfo, 32);
}
void render_on_rgb565(ANativeWindow_Buffer *nwBuffer, VoutInfo *voutInfo){
render_on_rgb(nwBuffer, voutInfo, 16);
}
static VoutRender g_pixformat_map[] = {
{PIXEL_FORMAT_RGBA_8888, WINDOW_FORMAT_RGBA_8888,render_on_rgb8888},
{PIXEL_FORMAT_RGBX_8888, WINDOW_FORMAT_RGBX_8888, render_on_rgb8888},
{PIXEL_FORMAT_RGB_565, WINDOW_FORMAT_RGB_565, render_on_rgb565}
};
VoutRender *get_render_by_window_format(int window_format){
int len = sizeof(g_pixformat_map);
for(int i=0; i<len; i++){
if(g_pixformat_map[i].window_format == window_format){
return &g_pixformat_map[i];
}
}
}
void android_native_window_display(ANativeWindow *aNativeWindow, VoutInfo *voutInfo){
int curr_format = ANativeWindow_getFormat(aNativeWindow);
//选择合适的渲染器
VoutRender *render = get_render_by_window_format(curr_format);
ANativeWindow_Buffer nwBuffer;
//在前面ANativeWindow 对象已经创建好了
//ANativeWindow *aNativeWindow = ANativeWindow_fromSurface(envPackage->env, *(envPackage->surface));
if (aNativeWindow == NULL) {
LOGE("ANativeWindow_fromSurface error");
return;
}
//scaled buffer to fit window //用来缩放rgb帧与显示窗口的大小,使得rgb数据可以适应窗口大小
int retval = ANativeWindow_setBuffersGeometry(aNativeWindow, voutInfo->buffer_width, voutInfo->buffer_height, render->window_format);
if (retval < 0) {
LOGE("ANativeWindow_setBuffersGeometry: error %d", retval);
return retval;
}
//锁定surface
if (0 != ANativeWindow_lock(aNativeWindow, &nwBuffer, 0)) {
LOGE("ANativeWindow_lock error");
return;
}
//根据前面选择好的渲染器
render->render(&nwBuffer, voutInfo);
// //解锁surface并显示新的缓冲区
if(0 !=ANativeWindow_unlockAndPost(aNativeWindow)){
LOGE("ANativeWindow_unlockAndPost error");
return;
}
ANativeWindow_release(aNativeWindow);
}