基于FFMPEG+Opencv的视频播放器(对h264进行解码)(注释清晰)

该代码参考了雷神的博客
[总结]FFMPEG视音频编解码零基础学习方法

#include 
#include 
#include   
#include   
#include 

#define __STDC_CONSTANT_MACROS

extern "C" {
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include "libavformat/avformat.h"
#include 
#include 
#include 
};

#pragma  comment(lib,"avcodec.lib")
#pragma  comment(lib,"avdevice.lib")
#pragma  comment(lib,"avfilter.lib")
#pragma  comment(lib,"avformat.lib")
#pragma  comment(lib,"avutil.lib")
#pragma  comment(lib,"postproc.lib")
#pragma  comment(lib,"swresample.lib")
#pragma  comment(lib,"swscale.lib")

using namespace std;
using namespace cv;

#define OUTPUT_YUV420P 0
#define SWS_BICUBIC 4
#define SDL_INIT_VIDEO 0x00000020u
#define SDL_INIT_AUDIO 0x00000010u
#define SDL_INIT_TIMER 0x00000001u
#define SDL_WINDOWPOS_UNDEFINED (0x1FFF0000|0)

void DisplayYUV(int w, int h, int fps, FILE* pFileIn)
{
	printf("yuv file w: %d, h: %d \n", w, h);

	//设置文件指针为文件末尾
	fseek(pFileIn, 0, SEEK_END);
	int frame_count = 0;
	
	//求文件帧数,对于MxN(rows x cols,M行N列)的BGR图像(CV_8UC3),其对应的YUV420图像大小是(3M/2)xN(CV_8UC1)
	//frame_count = (int)((long long)ftell(pFileIn) / ((w * h * 3) / 2));  // ftell 用于求文件大小,fetell对大于2.1G的文件会出错
	//printf("frame num is %d \n", frame_count);
	fseek(pFileIn, 0, SEEK_SET);//文件内位置定位到文件头

	int bufLen = w * h * 3 / 2;
	unsigned char* pYuvBuf = new unsigned char[bufLen];

	for (int i = 0; i < 14315/*frame_count*/; i++)
	{
		fread(pYuvBuf, bufLen * sizeof(unsigned char), 1, pFileIn);

		Mat yuvImg;
		yuvImg.create(h * 3 / 2, w, CV_8UC1);
		memcpy(yuvImg.data, pYuvBuf, bufLen * sizeof(unsigned char));
		Mat rgbImg;
		//颜色空间转换
		cvtColor(yuvImg, rgbImg, CV_YUV2BGR_I420);

		//imshow("yuv", yuvImg);
		imshow("rgb", rgbImg);
		waitKey(1000 / fps);

		printf("cnt: %d \n", i);
	}

	delete[] pYuvBuf;


	fclose(pFileIn);
}

int main(int argc, char* argv[]) {

	AVFormatContext* pFormatCtx;
	int i, videoindex;
	AVCodecContext* pCodecCtx;

	AVCodec* pCodec;
	//存放yuv
	AVFrame* pFrame, * pFrameYUV;
	unsigned char* out_buffer;
	//存放h264
	AVPacket* packet;
	struct SwsContext* img_convert_ctx;

	int y_size;

	FILE* fp_yuv;
	int ret, got_picture;

	char filepath[] = "c:/users/37075/source/repos/ffmpeg_learn/h264/output2.h264";

	//初始化
	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	//打开文件
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
		printf("Couldn't open input stream.\n");
		return -1;
	}
	//获取流的信息
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		printf("Couldn't find stram information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		//判断是否是视频
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}

	if (videoindex == -1) {
		printf("Didn't find a video stream.\n");
		return -1;
	}

	//获取解码器
	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	if ((pCodec = avcodec_find_decoder(pCodecCtx->codec_id)) == NULL) {
		printf("Codec not found.\n");
		return -1;
	}
	//打开解码器
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		printf("Could not open codec.\n");
		return -1;
	}

	FILE* fp = fopen("c:/users/37075/source/repos/ffmpeg_learn/info/output.txt", "wb+");

	fprintf(fp, "Duration: %d\n", pFormatCtx->duration);
	fprintf(fp, "Long Name: %s\n", pFormatCtx->iformat->long_name);
	fprintf(fp, "Width*Height: %d*%d\n", pFormatCtx->streams[videoindex]->codec->width, pFormatCtx->streams[videoindex]->codec->height);

	fclose(fp);

	//申请AVFrame结构,申请内存
	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();

	//申请内存
	out_buffer = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));

	//设置data和linesize
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
	packet = (AVPacket*)av_malloc(sizeof(AVPacket));
	printf("--------------- File Information ----------------\n");
	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, filepath, 0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	fp_yuv = fopen("c:/users/37075/source/repos/ffmpeg_learn/yuv/output.yuv", "wb+");

	//FILE* fp_h264 = fopen("c:/users/37075/source/repos/ffmpeg_learn/h264/test.h264", "wb+");
	//读取整个视频流,解码成帧,转换为yuv并保存
	//输入一个AVFrame,输出一个AVPacket
	while (av_read_frame(pFormatCtx, packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet->stream_index == videoindex) {
			//fwrite(packet->data, 1, packet->size, fp_h264);
			//解码
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if (ret < 0) {
				printf("Decode Error.\n");
				return -1;
			}
			if (got_picture) {
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);
				y_size = pCodecCtx->width * pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);	//U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);	//V
			}
		}
		av_free_packet(packet);
	}

	//fclose(fp_h264);

	//while (1) {
	//	ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
	//	if (ret < 0) break;
	//	if (got_picture) break;
	//	sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
	//		pFrameYUV->data, pFrameYUV->linesize);
	//	y_size = pCodecCtx->width * pCodecCtx->height;
	//	fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
	//	fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
	//	fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
	//}

	//ifstream fp_in;
	//fp_in.open("c:/users/37075/source/repos/ffmpeg_learn/yuv/output.yuv", ios_base::in | ios_base::binary);
	//if (fp_in.fail()) {
	//	cout << "the file is error" << endl;
	//	return -1;
	//}

	sws_freeContext(img_convert_ctx);
	
	DisplayYUV(pCodecCtx->width, pCodecCtx->height, 25, fp_yuv);

	fclose(fp_yuv);
	
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}

你可能感兴趣的:(FFmpeg学习)