网上的很多代码都有问题!!!
话不多说上代码:
#include
#include
#include
#include
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
#define snprintf _snprintf
//Windows
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/imgutils.h"
#include "SDL.h"
};
#else
//Linux...
#ifdef __cplusplus
extern "C"
{
#endif
#include
#include
#include
#include
#include
#include "SDL.h"
#ifdef __cplusplus
};
#endif
#endif
#define ENABLE_SDL 1
#define ENABLE_YUV 0
#define REFRESH_EVENT (SDL_USEREVENT + 1)
#define BREAK_EVENT (SDL_USEREVENT + 2)
#define FRAMEWITH 640
#define FRAMEHEIGTH 480
#define BGWIDTH 800
#define BGHEIGHT 600
static int thread_exit = 0;
int refresh_video1(void* opaque) {
thread_exit = 0;
while (!thread_exit) {
SDL_Event event;
event.type = REFRESH_EVENT;
SDL_PushEvent(&event);
SDL_Delay(40);
}
thread_exit = 0;
SDL_Event event;
event.type = BREAK_EVENT;
SDL_PushEvent(&event);
return 0;
}
int main(int argc, char* argv[])
{
AVFormatContext* pFormatCtx;
int i, videoindex;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVFrame* pFrame, * pFrameYUV, * pDstFrame;
uint8_t* out_buffer;
AVPacket* packet;
int y_size;
int ret, got_picture;
struct SwsContext* img_convert_ctx = nullptr;
//输入文件路径
//char filepath[] = "test_640x480.mp4";
char filepath[] = "C:/Users/Li/Desktop/test.h264";
int frame_cnt;
using namespace std;
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pFormatCtx->streams[videoindex]->codec;
//pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
pCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
/*
* 在此处添加输出视频信息的代码
* 取自于pFormatCtx,使用fprintf()
*/
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
out_buffer = new uint8_t[av_image_get_buffer_size(AV_PIX_FMT_YUV420P, FRAMEWITH, FRAMEHEIGTH, 1)];
//avpicture_fill((AVPicture*)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, FRAMEWITH, FRAMEHEIGTH);
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, FRAMEWITH, FRAMEHEIGTH, 1);
//avcodec_alloc_frame();
//av_image_get_buffer_size();
//av_frame_get_buffer
pDstFrame = av_frame_alloc();
//int nDstSize = avpicture_get_size(AV_PIX_FMT_YUV420P, FRAMEWITH * 2, FRAMEHEIGTH);
int nDstSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, BGWIDTH, BGHEIGHT, 1);
uint8_t* dstbuf = new uint8_t[nDstSize];
//avpicture_fill((AVPicture*)pDstFrame, dstbuf, AV_PIX_FMT_YUV420P, FRAMEWITH * 2, FRAMEHEIGTH);
av_image_fill_arrays(pDstFrame->data, pDstFrame->linesize, dstbuf, AV_PIX_FMT_YUV420P, BGWIDTH, BGHEIGHT, 1);
//pDstFrame->width = FRAMEWITH * 2;
//pDstFrame->height = FRAMEHEIGTH;
pDstFrame->width = BGWIDTH;
pDstFrame->height = BGHEIGHT;
pDstFrame->format = AV_PIX_FMT_YUV420P;
//将预先分配的AVFrame图像背景数据设置为黑色背景
memset(pDstFrame->data[0], 0x00, BGWIDTH * BGHEIGHT);
memset(pDstFrame->data[1], 0x80, BGWIDTH * BGHEIGHT / 4);
memset(pDstFrame->data[2], 0x80, BGWIDTH * BGHEIGHT / 4);
//memset(pDstFrame->data[0], 0, BGWIDTH * BGHEIGHT * 3);
//memset(pDstFrame->data[1], 0, BGWIDTH * BGHEIGHT);
//memset(pDstFrame->data[2], 0, BGWIDTH * BGHEIGHT);
//packet = (AVPacket*)av_malloc(sizeof(AVPacket));
packet = av_packet_alloc();
//Output Info-----------------------------
//printf("--------------- File Information ----------------\n");
//av_dump_format(pFormatCtx, 0, filepath, 0);
//printf("-------------------------------------------------\n");
//av_parser_init()
#if ENABLE_SDL
ret = SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
if (ret) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
return -10;
}
SDL_Window* screen = SDL_CreateWindow("Simple Video Play SDL2", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
BGWIDTH, BGHEIGHT, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
if (!screen) {
printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
return -11;
}
SDL_Renderer* renderer = SDL_CreateRenderer(screen, -1, 0);
if (!renderer) {
printf("SDL : could not create renderer - exiting:&s\n", SDL_GetError());
return -12;
}
SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
BGWIDTH, BGHEIGHT);
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.h = BGHEIGHT;
rect.w = BGWIDTH;
SDL_Thread* refreshThread = SDL_CreateThread(refresh_video1, NULL, NULL);
SDL_Event event;
#endif
int count = 0;
frame_cnt = 0;
#if ENABLE_YUV
//FILE *fp_h264 = fopen("test264.h264", "wb+");
//FILE* fp_yuv420 = fopen("test_yuv420p(411)_1280x480.yuv", "wb+");
FILE* fp_yuv420 = fopen("test_yuv420p(411)_800x800.yuv", "wb+");
#endif
while (av_read_frame(pFormatCtx, packet) >= 0) {
#if ENABLE_SDL
SDL_WaitEvent(&event);
if (event.type = REFRESH_EVENT) {
#endif
if (packet->stream_index == videoindex) {
/*
* 在此处添加输出H264码流的代码
* 取自于packet,使用fwrite()
*/
//fwrite(packet->data,1,packet->size,fp_h264);
pCodecCtx->width = pFormatCtx->streams[videoindex]->codec->width;
pCodecCtx->height = pFormatCtx->streams[videoindex]->codec->height;
pCodecCtx->pix_fmt = pFormatCtx->streams[videoindex]->codec->pix_fmt;
pCodecCtx->extradata = pFormatCtx->streams[videoindex]->codec->extradata;
pCodecCtx->extradata_size = pFormatCtx->streams[videoindex]->codec->extradata_size;
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
if (!img_convert_ctx) {
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
FRAMEWITH, FRAMEHEIGTH, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
}
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
printf("Decoded frame index: %d\n", frame_cnt);
//if (pFrameYUV)
//{
// int nYIndex = 0;
// int nUVIndex = 0;
// for (int i = 0; i < FRAMEHEIGTH; i++)
// {
// //Y
// memcpy(pDstFrame->data[0] + i * BGWIDTH, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
// memcpy(pDstFrame->data[0] + FRAMEWITH + i * BGWIDTH, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
// nYIndex++;
// }
// for (int i = 0; i < FRAMEHEIGTH / 4; i++)
// {
// //U
// memcpy(pDstFrame->data[1] + i * BGWIDTH, pFrameYUV->data[1] + nUVIndex * FRAMEWITH, FRAMEWITH);
// memcpy(pDstFrame->data[1] + FRAMEWITH + i * BGWIDTH, pFrameYUV->data[1] + nUVIndex * FRAMEWITH, FRAMEWITH);
// //V
// memcpy(pDstFrame->data[2] + i * BGWIDTH, pFrameYUV->data[2] + nUVIndex * FRAMEWITH, FRAMEWITH);
// memcpy(pDstFrame->data[2] + FRAMEWITH + i * BGWIDTH, pFrameYUV->data[2] + nUVIndex * FRAMEWITH, FRAMEWITH);
// nUVIndex++;
// }
//}
//if (pFrameYUV) {
//for (int i = 80; i < FRAMEHEIGTH; ++i) {
//memcpy(pDstFrame->data[0] + (i)*BGWIDTH*3 + 240, pFrameYUV->data[0] + i * FRAMEWITH*3, FRAMEWITH * 3);
// memcpy(pDstFrame->data[1] + (i)*BGWIDTH, pFrameYUV->data[1] + i * FRAMEWITH, FRAMEWITH);
// memcpy(pDstFrame->data[2] + (i)*BGWIDTH, pFrameYUV->data[2] + i * FRAMEWITH, FRAMEWITH);
//}
//}
//叠加部分,重点!!!
//
if (pFrameYUV) {
int nYIndex = 0;
int nUVIndex = 0;
for (int i = 80; i < FRAMEHEIGTH; ++i) {
//Y
memcpy(pDstFrame->data[0] + (i)*BGWIDTH+ 80, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
++nYIndex;
}
for (int i = 40; i < FRAMEHEIGTH / 2; ++i) {
//U
memcpy(pDstFrame->data[1] + (i)*(BGWIDTH/2)+40, pFrameYUV->data[1] + nUVIndex * (FRAMEWITH/2), FRAMEWITH /2 );
//V
memcpy(pDstFrame->data[2] + (i)*(BGWIDTH/2)+ 40, pFrameYUV->data[2] + nUVIndex * (FRAMEWITH/2), FRAMEWITH / 2);
++nUVIndex;
}
}
#if ENABLE_SDL
SDL_UpdateYUVTexture(texture, &rect,
pDstFrame->data[0], pDstFrame->linesize[0],
pDstFrame->data[1], pDstFrame->linesize[1],
pDstFrame->data[2], pDstFrame->linesize[2]);
//SDL_UpdateTexture(texture, &rect,
// pDstFrame->data[0], pDstFrame->linesize[0]);
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, &rect);
SDL_RenderPresent(renderer);
#endif
//fwrite(pDstFrame->data[0], 1, FRAMEWITH * FRAMEHEIGTH * 2, fp_yuv420);
//fwrite(pDstFrame->data[1], 1, FRAMEWITH * FRAMEHEIGTH / 2, fp_yuv420);
//fwrite(pDstFrame->data[2], 1, FRAMEWITH * FRAMEHEIGTH / 2, fp_yuv420);
#if ENABLE_YUV
fwrite(pDstFrame->data[0], 1, BGWIDTH * BGHEIGHT, fp_yuv420);
fwrite(pDstFrame->data[1], 1, BGWIDTH * BGHEIGHT / 4, fp_yuv420);
fwrite(pDstFrame->data[2], 1, BGWIDTH * BGHEIGHT / 4, fp_yuv420);
#endif
frame_cnt++;
}
}
#if ENABLE_SDL
} else {
break;
}
#endif
count++;
av_frame_unref(pFrame);
av_packet_unref(packet);
}
#if ENABLE_YUV
//fclose(fp_h264);
fclose(fp_yuv420);
#endif
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
av_frame_free(&pDstFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
if (out_buffer) {
delete[] out_buffer;
out_buffer = nullptr;
}
return 0;
}
//yuv420p数据存放格式为y1y2y3...u1u2u3...v1v2v3
//采样格式为色度在水平和垂直两个方向上,采样率都减半。这样每相邻的4个Y公用一个U,V数据,U,V的数据量都为Y的1/4
int nYIndex = 0;
int nUVIndex = 0;
for (int i = 80; i < FRAMEHEIGTH; ++i) {
//Y
memcpy(pDstFrame->data[0] + (i)*BGWIDTH+ 80, pFrameYUV->data[0] + nYIndex * FRAMEWITH, FRAMEWITH);
++nYIndex;
}
//之所以写两个循环,是为了更好的理解yuv420p数据格式,这里可以看到,必须将uv分量的水平以及垂直分量分别减半,单一四分任一分量都是会出问题的。
for (int i = 40; i < FRAMEHEIGTH / 2; ++i) {
//U
memcpy(pDstFrame->data[1] + (i)*(BGWIDTH/2)+40, pFrameYUV->data[1] + nUVIndex * (FRAMEWITH/2), FRAMEWITH /2 );
//V
memcpy(pDstFrame->data[2] + (i)*(BGWIDTH/2)+ 40, pFrameYUV->data[2] + nUVIndex * (FRAMEWITH/2), FRAMEWITH / 2);
++nUVIndex;
}