FFMpeg+SDL多线程播放器

大家在网上搜索FFmpeg+SDL应该就能找到很多基于FFmpeg+SDL的播放器,如:

雷骁骅的100行代码实现FFMpeg+SDL的视频播放器:http://blog.csdn.net/leixiaohua1020/article/details/8652605

网上现有的版本的的基本流程都是解码出来一帧以后再播放一帧,这样虽然实现起来比较简单,但是这样可能会造成视频播放出现不流畅的状况,基于此点,我写了一个简单将视频解码和播放分开的代码。

#include 
#include 
#include 
#include 

#ifdef __cplusplus
extern "C"
{
#endif
#include 
#include 
#include 
#include "libswresample/swresample.h"
#include "libavutil/samplefmt.h"
#include 
#include 
#include 
#include 
#ifdef __cplusplus
};
#endif


typedef struct VideoState
{
	AVFormatContext *pFormatCtx;
	AVCodecContext  *pVideoCtx;
	AVCodecContext  *pAudioCtx;
	AVCodec         *pVideo;
	int              VideoIndex;
	AVCodec         *pAudio;
	int              AudioIndex;
	AVStream        *pVideoStream;
	AVStream        *pAudioStream;
	char *FileName;
}VideoState;

typedef struct FrameBuff
{
	unsigned int Counter;
	SDL_Overlay *Bmp;
	unsigned int height,width;
	int VideoFinish;
	//SDL_mutex *mutex;
	//SDL_cond *cond;
	struct FrameBuff *next;
}FrameBuff;
VideoState *vs = NULL;
SDL_Surface *screen;
SDL_cond *cond;
SDL_mutex *mutex ;
void init_video_state(VideoState **vs)
{
	int i;
	int videoindex = -1;
	int audioindex = -1;
	(*vs) = (VideoState *)malloc(sizeof(VideoState));
	(*vs)->FileName   = "testdec.mp4";
	(*vs)->pFormatCtx = avformat_alloc_context();
	AVFormatContext *p = (*vs)->pFormatCtx;
	if(avformat_open_input(&p,(*vs)->FileName,NULL,NULL) )
	{
		printf("could not open the file/n");
		exit(1);
	}
	if(avformat_find_stream_info((*vs)->pFormatCtx,NULL)<0)
	{
		printf("could not find the stream/n");
		exit(1);
	}
	for( i = 0;i<(*vs)->pFormatCtx->nb_streams;i++)
	{
		if((*vs)->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
		{
			videoindex = i;
		}
		if((*vs)->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
		{
			audioindex = i;
		}
	}
	if(videoindex==-1||audioindex==-1)
	{
		printf("could not find the audio stream or media stream/n");
		exit(1);
	}
	(*vs)->VideoIndex = videoindex;
	(*vs)->AudioIndex = audioindex;
	(*vs)->pVideoStream = (*vs)->pFormatCtx->streams[videoindex];
	(*vs)->pAudioStream = (*vs)->pFormatCtx->streams[audioindex];

	(*vs)->pVideoCtx    = (*vs)->pVideoStream->codec;
	(*vs)->pVideo       = avcodec_find_decoder((*vs)->pVideoCtx->codec_id);

	(*vs)->pAudioCtx    = (*vs)->pAudioStream->codec;
	(*vs)->pAudio       = avcodec_find_decoder((*vs)->pAudioCtx->codec_id);
	if(!((*vs)->pVideo)||!((*vs)->pAudio))
	{
		printf("could not find the video or audio decoder/n");
		exit(1);
	}
}

void init_frame_buff(FrameBuff **head)
{
	(*head) = (FrameBuff *)malloc(sizeof(FrameBuff));
	if(*head == NULL)
	{
		printf("FrameBuff is fail to access/n");
		exit(1);
	}
	(*head)->Bmp = SDL_CreateYUVOverlay(vs->pVideoCtx->width,vs->pVideoCtx->height,SDL_YV12_OVERLAY,screen);
	(*head)->VideoFinish = 0;
	//(*head)->mutex = SDL_CreateMutex();
	//(*head)->cond  = SDL_CreateCond();
	(*head)->next = NULL;
}
void video_dec(FrameBuff *head)
{
	int a = -1;
	a = avcodec_open2(vs->pVideoCtx, vs->pVideo,NULL);
	if(a <0)
	{
		printf("video Could not open codec.\n");
		exit(1);
	}
	//SDL_Event videoevent;
	FrameBuff *rhead = head;
	unsigned int ret;
	int got_picture;
	int index = 0;
	
	struct SwsContext *img_convert_ctx;
	AVFrame *pFrame,*pFrameYUV;
	AVPacket *packet=(AVPacket *)malloc(sizeof(AVPacket));
	av_init_packet(packet);
	pFrame=av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	
	img_convert_ctx = sws_getContext(vs->pVideoCtx->width, vs->pVideoCtx->height, vs->pVideoCtx->pix_fmt, vs->pVideoCtx->width, vs->pVideoCtx->height, AV_PIX_FMT_YUV420P, 4, NULL, NULL, NULL);
	while(av_read_frame(vs->pFormatCtx, packet)>=0)
	{
		if(packet->stream_index ==vs->VideoIndex)
		{
			ret = avcodec_decode_video2(vs->pVideoCtx, pFrame, &got_picture, packet);
			if(ret < 0)
			{
				printf("Decode Error.\n");
			    exit(1);
			}
			if(got_picture)
			{
			    //SDL_CondWait(cond,mutex);
			    SDL_LockMutex(mutex);
			    FrameBuff *temp = NULL;
			    init_frame_buff(&temp);
			    SDL_LockYUVOverlay(rhead->Bmp);
			    pFrameYUV->data[0]=rhead->Bmp->pixels[0];
			    pFrameYUV->data[1]=rhead->Bmp->pixels[2];
			    pFrameYUV->data[2]=rhead->Bmp->pixels[1];
			    pFrameYUV->linesize[0]=rhead->Bmp->pitches[0];
			    pFrameYUV->linesize[1]=rhead->Bmp->pitches[2];
			    pFrameYUV->linesize[2]=rhead->Bmp->pitches[1];
			    sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0,
			    		vs->pVideoCtx->height, pFrameYUV->data, pFrameYUV->linesize);
			    SDL_UnlockYUVOverlay(rhead->Bmp);
			    index++;
			    
			    rhead->Counter = index;
			    rhead->width = vs->pVideoCtx->width;
			    rhead->height = vs->pVideoCtx->height;
			    rhead->VideoFinish = 1;
			    rhead->next = temp;
			    rhead = temp;
			    rhead->next =NULL;
			    //SDL_Delay(40);
			   // videoevent = SFM_REFRESH_EVENT;
			    //SDL_PushEvent(videoevent);
			    //SDL_CondSignal(cond);
			    SDL_UnlockMutex(mutex);
			    printf("%d frame\n",index);
			        
			}
		}
		av_free_packet(packet);
	}
	avcodec_close(vs->pVideoCtx);
	printf("video dec end\n");
}
void main()
{
	SDL_Event videoevent,audioevent;
	FrameBuff *fb=NULL;
	AudioBuff *ab=NULL;
	SDL_Rect rect;
	printf("SDL strat\n");
	av_register_all();
	init_video_state(&vs);

	cond = SDL_CreateCond();
	mutex= SDL_CreateMutex();
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
	{
	    printf( "Could not initialize SDL - %s\n", SDL_GetError());
	    exit(1);
	}
	screen = SDL_SetVideoMode(vs->pVideoCtx->width, vs->pVideoCtx->height, 32,SDL_SWSURFACE);
	init_frame_buff(&fb);
	init_audio_buff(&ab);
	
	//video_dec(fb);
	SDL_Thread *videothread = SDL_CreateThread(video_dec,fb);
	//SDL_Thread *audiothread = SDL_CreateThread(audio_dec,ab);
	//printf("SDL strat\n");
	
	
	if(!screen)
	{
	    printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError());
	    exit(1);
	}
	rect.x = 0;
	rect.y = 0;
	rect.w = vs->pVideoCtx->width;
	rect.h = vs->pVideoCtx->height;

	SDL_WM_SetCaption("Simplest FFmpeg Player",NULL);

	for(;;)
	{
		//SDL_CondWait(cond,mutex);
		
		if(/*ab->AudioFinish&&*/fb->VideoFinish&&fb->next!=NULL)
		{
			FrameBuff *temp = NULL;			
			SDL_LockMutex(mutex);
			SDL_DisplayYUVOverlay(fb->Bmp, &rect);
			printf("%d prestation frame\n",fb->Counter);			
			temp = fb;
			fb = fb->next;
			SDL_UnlockMutex(mutex);
			free(temp);
			SDL_Delay(40);
		}
		
	}
	return;
}

上面代码在前人的基础上,将编码前的一些设置都写在了结构体VideoState中,并在init_video_state中初始化。FrameBuff结构体存放的是每帧视频解码后得到的数据,并将其转化位RGB格式,存放在bmp中,大家可以很清楚的看到FrameBuff是一个链表,在解码的时候将解出来的数据存入FrameBuff,然后解下一帧的时候将解码数据存入FrameBuf的next指向的地址中去。在显示的时候,从链表的头部开始播放,播放完节点的数据后,再继续播放当前节点的下一个节点的数据。

由于上述代码中使用了SDL_CreatThread(video_dec,fb),也就意味着解码和播放时两个不同的线程,初看没什么问题,仔细一想,解码块需要对链表进行写操作,而播放却需要对链表进行度操作,那么,当链表还未写入的时候,如果进行了读操作,会怎么样?肯定的,这样播放出来的视频是有问题的,所有为了保护链表的数据,在改变链表的时候,需要对链表上锁,保证只有一个线程能访问此段数据,其他线程会被阻塞。

当然代码中还有不少问题,比如说,当前链表会随着解码变的越来越长,那么占用的内存也会越来越多,所以,应该限制一下链表的长度,当节点超过N以后就先等待,等播放完一帧以后再继续解码。

你可能感兴趣的:(FFMpeg)