1、找到视频流信息
2、初始化解码器上下文,并将流中解码器参数拷贝给解码器上下文
3、打开解码器
4、转换像素格式
5、循环读取视频数据包并对数据进行解码avcodec_send_packet()/avcodec_receive_frame();
6、将解码后的数据帧转成 RGB 数据,并保存
文件操作
相关API
#include
#include
#include
#include
#include
#include
#include
#define INBUF_SIZE 4096
#define WORD uint16_t
#define DWORD uint32_t
#define LONG int32_t
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER, *PBITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
} BITMAPINFOHEADER, *PBITMAPINFOHEADER;
//将解码后的数据帧转成 RGB 数据,并保存
void saveBMP(struct SwsContext *img_convert_ctx, AVFrame *frame, char *filename)
{
//1 先进行转换, YUV420=>RGB24:
int w = frame->width;
int h = frame->height;
//转为RGB24
AVFrame *pFrameRGB = av_frame_alloc();
int numBytes = av_image_get_buffer_size(AV_PIX_FMT_BGR24, w, h, 1);
uint8_t *buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// 存储一帧像素数据缓冲区
av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_BGR24, w, h, 1);
sws_scale(img_convert_ctx, frame->data, frame->linesize,
0, h, pFrameRGB->data, pFrameRGB->linesize);
//2 构造 BITMAPINFOHEADER
BITMAPINFOHEADER header;
header.biSize = sizeof(BITMAPINFOHEADER);
header.biWidth = w;
header.biHeight = h*(-1);
header.biBitCount = 24;
header.biCompression = 0;
header.biSizeImage = 0;
header.biClrImportant = 0;
header.biClrUsed = 0;
header.biXPelsPerMeter = 0;
header.biYPelsPerMeter = 0;
header.biPlanes = 1;
//3 构造文件头
BITMAPFILEHEADER bmpFileHeader = {0,};
// //HANDLE hFile = NULL;
// DWORD dwTotalWriten = 0;
// DWORD dwWriten;
bmpFileHeader.bfType = 0x4d42; //'BM';
bmpFileHeader.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)+ numBytes;
bmpFileHeader.bfOffBits=sizeof(BITMAPFILEHEADER)+sizeof(BITMAPINFOHEADER);
FILE* pf = fopen(filename, "wb");
fwrite(&bmpFileHeader, sizeof(BITMAPFILEHEADER), 1, pf);
fwrite(&header, sizeof(BITMAPINFOHEADER), 1, pf);
fwrite(pFrameRGB->data[0], 1, numBytes, pf);
fclose(pf);
//释放资源
av_free(&pFrameRGB[0]);
av_frame_free(&pFrameRGB);
av_free(buffer);
}
static int decode_write_frame(const char *out_filename, AVCodecContext *avctx, struct SwsContext *img_convert_ctx, AVFrame *frame, int *frame_out, AVPacket *pkt, int last) {
int ret;
char buf[1024];
//将解压的数传给解码器
ret = avcodec_send_packet(avctx, pkt);
if (ret < 0) {
fprintf(stderr ,"Send video packet failed: %d\n", ret);
return ret;
}
//负责保存返回在frame中的压缩数据(解码数据)。
ret = avcodec_receive_frame(avctx, frame);
if (ret < 0) {
printf("Receive video frame failed: %d\n", ret);
}
fflush(stdout);
saveBMP(img_convert_ctx, frame, buf);
(*frame_out)++;
return 0;
}
//视频转图片
int decode_video(const char *filename, const char *outfilename) {
int ret;
AVFormatContext *fmt_ctx = NULL;
const AVCodec *codec;
AVCodecContext *c = NULL;
AVStream *st = NULL;
int stream_index;
int frame_count;
AVFrame *frame; //未编码的原始数据
struct SwsContext *img_convert_ctx;//主要用于视频图像的转换,比如格式转换
AVPacket pkt;//编码后的数据
//注册所有支持的多媒体格式和编解码器
av_register_all();
//打开输入文件,为 fmt_ctx 分配内存
ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open source file %s\n", filename);
return ret;
}
//查找流信息
ret = avformat_find_stream_info(fmt_ctx, NULL);
if(ret < 0) {
fprintf(stderr, "Could not find stream info %s\n", filename);
return ret;
}
//打印输入文件的相关信息
av_dump_format(fmt_ctx, 0, filename, 0);
//
av_init_packet(&pkt);
//找到视频流
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO), filename);
return ret;
}
//将视频流所在的序号ret 赋值给 stream_index
stream_index = ret;
st = fmt_ctx->streams[stream_index];
//通过codecid找到对应的解码器
codec = avcodec_find_decoder(st->codecpar->codec_id);
if (!codec) {
fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
return AVERROR(EINVAL);
}
//初始化解码器上下文
c = avcodec_alloc_context3(NULL);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
return -1;
}
//将流中的解码码器参数拷贝给解码器上下文
ret = avcodec_parameters_to_context(c, st->codecpar);
if (ret < 0) {
fprintf(stderr, "Failed to copy %s parameters to decoder context\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
return ret;
}
//打开解码器
ret = avcodec_open2(c, codec, NULL);
if(ret < 0) {
fprintf(stderr, "Could not open codec\n");
return ret;
}
// 如果想將某個PixelFormat轉換至另一個PixelFormat,例如,將YUV420P轉換成YUYV422,或是想變換圖的大小,都可以使用swscale達成。
img_convert_ctx = sws_getContext(c->width, c->height, c->pix_fmt, c->width, c->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n");
return -1;
}
//初始化AVFrame
frame = av_frame_alloc();
if(!frame) {
fprintf(stderr, "Could not allocate video frame\n");
return -1;
}
frame_count = 0;
//从输入的多媒体文件中读取一帧一帧的数据
while (av_read_frame(fmt_ctx, &pkt)) {
if (pkt.stream_index == stream_index) {
ret = decode_write_frame(outfilename, c, img_convert_ctx, frame, &frame_count, &pkt, 0);
if (ret < 0) {
continue;
}
}
av_packet_unref(&pkt);
}
pkt.data = NULL;
pkt.size = 0;
decode_write_frame(outfilename, c, img_convert_ctx, frame, &frame_count, &pkt, 1);
avformat_close_input(&fmt_ctx);
sws_freeContext(img_convert_ctx);
avcodec_free_context(&c);
return 0;
}