这次的笔记是基于我上一个笔记的基础上做的,主要是不要重复搭建环境,如果有不懂的,请先做 VS2008+ffmpeg SDK3.2调试tutorial01
第1步:
下载SDL-devel-1.2.15-VC.zip,下载地址:点击下载,我也上传了一份到115网盘http://115.com/file/an923mtd,并解压。
第2步:
将解压得到的sdl目录下的include文件夹拷到test.cpp同目录下,并将include文件夹改名为sdl
将sdl中的.h文件添加到工程的头文件。
第3步:
将解压的sdl目录下的lib/x86文件夹下的SDL.lib、SDLmain.lib拷贝至工程目录下的lib文件夹
在VS2008中,单击项目属性->链接器->输入,在附加依赖项中将SDL.lib、SDLmain.lib加入。
本来是还要将SDL.dll拷进debug目录的,但我看到里面本来就有了,就省了这一步,如果没有的,复制进去就可以了。
第4步:编写代码,测试
tes.cpp代码
#include
#include
#ifdef __cplusplus
extern "C" {
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#ifdef __cplusplus
}
#endif
#include "sdl\SDL.h"
#include "sdl\SDL_thread.h"
#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
}
int main() {
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer;
float aspect_ratio;
SDL_Overlay *bmp;
SDL_Surface *screen;
SDL_Rect rect;
SDL_Event event;
static struct SwsContext *img_convert_ctx;
char * filePath="test.mp4";
// Register all formats and codecs
av_register_all();
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Open video file
if(av_open_input_file(&pFormatCtx, filePath, NULL, 0, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, filePath, 0);
// Find the first video stream
videoStream=-1;
for(i=0; inb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;
// Make a screen to put our video
#ifndef __DARWIN__
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
if(!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
// Allocate a place to put our YUV image on that screen
bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
pCodecCtx->height,
SDL_YV12_OVERLAY,
screen);
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);
if(frameFinished) {
SDL_LockYUVOverlay(bmp);
AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];
pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];
// Convert the image from its native format to RGB
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
// Convert the image from its native format to RGB
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,0, pCodecCtx->height, pict.data, pict.linesize);
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
SDL_PollEvent(&event);
switch(event.type) {
case SDL_QUIT:
SDL_Quit();
exit(0);
break;
default:
break;
}
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
printf("执行完毕\n");
system("pause");
return 0;
}
写完收工,
由于没有调整帧,播放速度超快,下一步目标是调整速度,并调出声音。