项目代码: https://blog.csdn.net/al4fun/article/details/104293868
下面代码展示了如何将H264视频流解码为YUV原始图像文件。
avcodec_send_packet
和avcodec_receive_frame
这两个函数来完成的。yuv_save(AVFrame *avFrame, char *filename)
函数需要知道的是:YUV420P在AVFrame中的存储格式为data[0]存Y分量,data[1]存U分量,data[2]存V分量,而图像每一行Y、U、V数据的大小分别是linesize[0]、linesize[1]和linesize[2],并且,因为存在填充数据,因此linesize并不等于图像的宽度。java层调用:
File src = new File(getCacheDir().getAbsolutePath(), "sintel.h264");
File dst = new File(getCacheDir().getAbsolutePath(), "sintel_frame");
decodeVideo(src.getAbsolutePath(), dst.getAbsolutePath());
native层实现:
//env: Android JNI, C++11, FFmpeg4.0.
#define INBUF_SIZE 4096
static void yuv_save(AVFrame *avFrame, char *filename);
static void decode(AVCodecContext *avCodecContext, AVFrame *avFrame, AVPacket *pkt,
const char *filename);
extern "C"
JNIEXPORT jint JNICALL
Java_com_example_helloffmpeg_MainActivity_decodeVideo(JNIEnv *env, jobject thiz,
jstring file_path, jstring dst_file_path) {
const char *filename = env->GetStringUTFChars(file_path, nullptr);
const char *outfilename = env->GetStringUTFChars(dst_file_path, nullptr);
__android_log_write(ANDROID_LOG_ERROR, TAG, filename);
__android_log_write(ANDROID_LOG_ERROR, TAG, outfilename);
const AVCodec *avCodec = nullptr;
AVCodecParserContext *avCodecParserContext = nullptr;
AVCodecContext *avCodecContext = nullptr;
FILE *file = nullptr;
AVFrame *avFrame = nullptr;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t *data = nullptr;
size_t data_size;
int ret;
AVPacket *avPacket = nullptr;
avPacket = av_packet_alloc();
if (!avPacket)
goto end;
//原型:memset(void *buffer, int c, int count)
//将inbuf[INBUF_SIZE]及其后面的元素都设为了0
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
//找到h264的解码器
avCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!avCodec) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Codec not found\n");
goto end;
}
avCodecParserContext = av_parser_init(avCodec->id);
if (!avCodecParserContext) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "avCodecParserContext not found\n");
goto end;
}
avCodecContext = avcodec_alloc_context3(avCodec);
if (!avCodecContext) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Could not allocate avCodecContext\n");
goto end;
}
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(avCodecContext, avCodec, nullptr) < 0) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Could not open avCodec\n");
goto end;
}
//打开输入文件
file = fopen(filename, "rbe");
if (!file) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Could not open in file\n");
__android_log_write(ANDROID_LOG_ERROR, TAG, strerror(errno));
goto end;
}
avFrame = av_frame_alloc();
if (!avFrame) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Could not allocate video avFrame\n");
goto end;
}
while (!feof(file)) {
/* read raw data from the input file */
data_size = fread(inbuf, 1, INBUF_SIZE, file);
if (!data_size)
break;
/* use the avCodecParserContext to split the data into frames */
data = inbuf;
while (data_size > 0) {
//从data中解析出avPacket数据
ret = av_parser_parse2(avCodecParserContext, avCodecContext, &avPacket->data,
&avPacket->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Error while parsing\n");
goto end;
}
//后移数组指针并更新data_size
data += ret;
data_size -= ret;
//解码avPacket
if (avPacket->size)
decode(avCodecContext, avFrame, avPacket, outfilename);
}
}
/* flush the decoder */
decode(avCodecContext, avFrame, nullptr, outfilename);
end:
env->ReleaseStringUTFChars(file_path, filename);
env->ReleaseStringUTFChars(dst_file_path, outfilename);
if (file) fclose(file);
if (avCodecParserContext) av_parser_close(avCodecParserContext);
if (avCodecContext) avcodec_free_context(&avCodecContext);
if (avFrame) av_frame_free(&avFrame);
if (avPacket) av_packet_free(&avPacket);
return 0;
}
//从packet中解码出frame
static void decode(AVCodecContext *avCodecContext, AVFrame *avFrame, AVPacket *pkt,
const char *filename) {
char buf[1024];
int ret;
//将packet发送给codec
ret = avcodec_send_packet(avCodecContext, pkt);
if (ret < 0) {
__android_log_print(ANDROID_LOG_ERROR, TAG, "Error sending a packet for decoding: %s\n",
av_err2str(ret));
return;
}
while (ret >= 0) {
//解码出frame并存入avFrame参数
ret = avcodec_receive_frame(avCodecContext, avFrame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
return;
} else if (ret < 0) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Error during decoding\n");
return;
}
//为防止文件太多观察不便,每20个avFrame中抽取一个并保存为文件
if (avCodecContext->frame_number % 20 == 0) {
__android_log_print(ANDROID_LOG_ERROR, TAG, "saving avFrame %3d\n",
avCodecContext->frame_number);
/* the picture is allocated by the decoder. no need to
free it */
//拼接文件名
//C库函数:int snprintf(char *str, size_t size, const char *format, ...),将可变参数(...)按照format格式化成字符串,
//并将字符串复制到str中,size为要写入的字符的最大数目,超过size会被截断。
snprintf(buf, sizeof(buf), "%s-%d.yuv", filename, avCodecContext->frame_number);
yuv_save(avFrame, buf);
}
}
}
//将avFrame保存为yuv文件
static void yuv_save(AVFrame *avFrame, char *filename) {
FILE *file;
file = fopen(filename, "we");
if (!file) {
__android_log_write(ANDROID_LOG_ERROR, TAG, "Could not open out file\n");
__android_log_write(ANDROID_LOG_ERROR, TAG, strerror(errno));
return;
}
int width = avFrame->width;
int height = avFrame->height;
for (int i = 0; i < height; i++)
fwrite(avFrame->data[0] + i * avFrame->linesize[0], 1, width, file);
for (int j = 0; j < height / 2; j++)
fwrite(avFrame->data[1] + j * avFrame->linesize[1], 1, width / 2, file);
for (int k = 0; k < height / 2; k++)
fwrite(avFrame->data[2] + k * avFrame->linesize[2], 1, width / 2, file);
fclose(file);
}