在CSDN学习ffmpeg是绕不开雷神的,在跑雷神音频播放器代码时就发现,MP3文件有些是带有封面的,因为在读取MP3文件的时候,打印的log显示还有一个视频流,而在跑雷神视频播放器代码时也发现,测试视频也是有声音的,因为log也打印了音频流。所以笔者想把雷神的音频播放器代码和视频播放器代码一糅合不就可以播放音视频了吗?(虽然没有同步)
雷神的音频播放器
雷神的视频播放器
加上最近在学习C++,不如就用C++把两个代码糅合在一起。水平不高,单纯分享。工程一共4个文件:
player类的内部函数
#include "player.h"
using namespace player;
#define ASSERT(c) do{if(c == NULL){printf("%s is NULL\n",#c);return -1;}}while(0);
Player::Player(const char *file):
screenH(0)
,screenW(0)
,filename(NULL)
,isEventClose(true)
,pFormatCtx(NULL)
,pPacket(NULL)
,pCodec(NULL)
,isClose(true)
,isUnpackClose(true)
,isAudioClose(true)
,isVideoClose(true)
,audioStreamIndex(-1)
,videoStreamIndex(-1)
,eventThread(NULL)
,unpackThread(NULL)
,audioPlayerThread(NULL)
,videoPlayerThread(NULL)
{
if(file)
{
filename = (char *)malloc(strlen(file)+1);
if(filename)
sprintf(filename,"%s",file);
}
else
filename = NULL;
avformat_network_init();
pMutex = SDL_CreateMutex();
}
Player::~Player()
{
if(filename)
free(filename);
if(pMutex != NULL)
SDL_DestroyMutex(pMutex);
}
int Player::versionShow(void)
{
printf(VERSION);
return 0;
}
//初始化分3步
int Player::init(void)
{
//1.为格式化上下文分配空间
pFormatCtx = avformat_alloc_context();
ASSERT(pFormatCtx)
printf("filename:%s\n",filename);
//初始化SDL
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER | SDL_INIT_EVENTS)) {
printf( "Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}else
{
//2.读取文件到格式化上下文(上下文可以看作是缓存)
if(avformat_open_input(&pFormatCtx,filename,NULL,NULL)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
//3.从格式化上下文中读取流
if(avformat_find_stream_info(pFormatCtx,NULL)<0){
printf("Couldn't find stream information.\n");
return -1;
}
//打印pFormatCtx信息
av_dump_format(pFormatCtx, 0, filename, false);
//创建事件线程。主要负责键盘输入,或者窗口事件,比如点击窗口红叉
eventThread = SDL_CreateThread(eventLoop,"event",(void *)this);
ASSERT(eventThread)
isEventClose = false;
//创建解包线程。主要负责从pFormatCtx里读取packet,然后发送给解码器,获取可以播放或者显示的帧
unpackThread = SDL_CreateThread(unpackLoop,"unpack",(void *)this);
ASSERT(unpackThread)
}
return 0;
}
int Player::exit(void)
{
isClose = true;
//确保其他线程都结束后,事件线程最后结束
while(!(isUnpackClose && isAudioClose && isVideoClose));
printf("SDL_Quit\n");
SDL_Quit();
if(pFormatCtx)
{
avformat_close_input(&pFormatCtx);
pFormatCtx = NULL;
}
isEventClose = true;
return 0;
}
//这里虽然写了处理键盘事件,但是没有完成
int Player::keyEvent(int key)
{
switch (key)
{
case SDLK_SPACE:
break;
default:
break;
}
return 0;
}
//这里事件线程主要就处理了关闭窗口这个事件
//关闭窗口就结束播放,但是要确保资源都已经释放,所以事件线程最后退出
int Player::eventLoop(void *pthis)
{
ASSERT(pthis)
SDL_Event event;
Player *tmpthis = (Player *) pthis;
tmpthis->isEventClose = false;
tmpthis->isClose = false;
printf("Start eventLoop -- %x\n",tmpthis);
while((!tmpthis->isEventClose) && (pthis !=NULL))
{
SDL_WaitEvent(&event);
switch (event.type)
{
case SDL_KEYDOWN://键盘按下
tmpthis->keyEvent(event.key.keysym.sym);
break;
case SDL_QUIT://关闭窗口
tmpthis->exit();
break;
default:
break;
}
}
printf("leave eventLoop\n");
return 0;
}
//单个流对应的编解码器初始化(这里只有解码器),分为4步
int Player::codecContextInit(int streamIndex)
{
printf("Init %d\n",streamIndex);
//1.为解码器上下文分配空间
pCodec[streamIndex].pCodeCtx = avcodec_alloc_context3(NULL);
ASSERT(pCodec[streamIndex].pCodeCtx)
//2.由解码器参数转换为解码器上下文
avcodec_parameters_to_context(pCodec[streamIndex].pCodeCtx,pFormatCtx->streams[streamIndex]->codecpar);
pCodec[streamIndex].pCodeCtx->pkt_timebase = pFormatCtx->streams[streamIndex]->time_base;
//3.寻找当前上下文对应的解码器,如果找不到,可能是你的FFMPEG没有安装对于的编解码器动态库
pCodec[streamIndex].pAvCodec = avcodec_find_decoder(pCodec[streamIndex].pCodeCtx->codec_id);
ASSERT(pCodec[streamIndex].pAvCodec)
//4.打开对应的解码器,到这一步,已经可以从流里面解码了
if(avcodec_open2(pCodec[streamIndex].pCodeCtx, pCodec[streamIndex].pAvCodec,NULL)<0){
printf("Could not open codec.\n");
return -1;
}
//根据是音频流还是视频流决定如何初始化
switch (pFormatCtx->streams[streamIndex]->codecpar->codec_type)
{
case AVMEDIA_TYPE_VIDEO:
printf("AVMEDIA_TYPE_VIDEO\n");
videoStreamIndex = streamIndex;
//雷神demo里的视频格式是YUV,而MP3是RGB24,这里没有考虑其他情况,只针对这两个测试文件编写的代码
//所以其他文件播放可能有问题
if(pCodec[streamIndex].pCodeCtx->pix_fmt == AV_PIX_FMT_YUV420P)
{
//从解码器接收到的视频帧
pCodec[streamIndex].pFrame = av_frame_alloc();
ASSERT(pCodec[streamIndex].pFrame)
//转换后的视频帧
pCodec[streamIndex].videoOutFmt.pOutFrame = av_frame_alloc();
ASSERT(pCodec[streamIndex].videoOutFmt.pOutFrame)
//上面的alloc并没有分配实际内存,pOutFrame的内存其实是下面申请的内存填充而来
pCodec[streamIndex].pBuffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1));
ASSERT(pCodec[streamIndex].pBuffer)
//把上面申请的内存填充到pOutFrame中去,pFrame直接接收的是解码器输出的帧,所以内存不用自己分配
av_image_fill_arrays(pCodec[streamIndex].videoOutFmt.pOutFrame->data, pCodec[streamIndex].videoOutFmt.pOutFrame->linesize,pCodec[streamIndex].pBuffer, \
AV_PIX_FMT_YUV420P,pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1);
//视频转换上下文初始化,因为SDL并不是什么格式都能播放,所以首先从视频流里面解码出来的帧的格式是FFMPEG中的某个格式
//然后需要找到FFMPEG中这个格式对应到SDL中是什么格式,如果没有对应格式或者该格式在SDL中播放有问题,最简单的办法就是
//无论解码出来什么格式都转化为SDL支持比较好的格式
pCodec[streamIndex].pSwsConvertCtx = sws_getContext(pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height, pCodec[streamIndex].pCodeCtx->pix_fmt, \
pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,AV_PIX_FMT_YUV420P , SWS_BICUBIC, NULL, NULL, NULL);
ASSERT(pCodec[streamIndex].pSwsConvertCtx)
}
else
{
//同上
pCodec[streamIndex].pFrame = av_frame_alloc();
ASSERT(pCodec[streamIndex].pFrame)
pCodec[streamIndex].videoOutFmt.pOutFrame = av_frame_alloc();
ASSERT(pCodec[streamIndex].videoOutFmt.pOutFrame)
pCodec[streamIndex].pBuffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGB24, pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1));
ASSERT(pCodec[streamIndex].pBuffer)
av_image_fill_arrays(pCodec[streamIndex].videoOutFmt.pOutFrame->data, pCodec[streamIndex].videoOutFmt.pOutFrame->linesize,pCodec[streamIndex].pBuffer, \
AV_PIX_FMT_RGB24,pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,1);
pCodec[streamIndex].pSwsConvertCtx = sws_getContext(pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height, pCodec[streamIndex].pCodeCtx->pix_fmt, \
pCodec[streamIndex].pCodeCtx->width, pCodec[streamIndex].pCodeCtx->height,AV_PIX_FMT_RGB24 , SWS_BICUBIC, NULL, NULL, NULL);
ASSERT(pCodec[streamIndex].pSwsConvertCtx)
}
//把视频流中的视频画面宽、高保存,在视频线程中初始化窗口会用到
if(screenWwidth)
screenW = pCodec[streamIndex].pCodeCtx->width;
if(screenHheight)
screenH = pCodec[streamIndex].pCodeCtx->height;
break;
case AVMEDIA_TYPE_AUDIO:
printf("AVMEDIA_TYPE_AUDIO\n");
audioStreamIndex = streamIndex;
pCodec[streamIndex].pAudioOutFmt = (AudioOutFmt *)malloc(sizeof(AudioOutFmt));
ASSERT(pCodec[streamIndex].pAudioOutFmt)
pCodec[streamIndex].pAudioOutFmt->outChLayout = AV_CH_LAYOUT_STEREO;
pCodec[streamIndex].pAudioOutFmt->outNbSamples = pCodec[streamIndex].pCodeCtx->frame_size;
pCodec[streamIndex].pAudioOutFmt->outNbSampleFmt = AV_SAMPLE_FMT_S16;
pCodec[streamIndex].pAudioOutFmt->outSamplesRate = pCodec[streamIndex].pCodeCtx->sample_rate;
pCodec[streamIndex].pAudioOutFmt->outChannels = av_get_channel_layout_nb_channels(pCodec[streamIndex].pAudioOutFmt->outChLayout);
pCodec[streamIndex].pAudioOutFmt->outFrameSize = av_samples_get_buffer_size(NULL,pCodec[streamIndex].pAudioOutFmt->outChannels ,pCodec[streamIndex].pAudioOutFmt->outNbSamples,pCodec[streamIndex].pAudioOutFmt->outNbSampleFmt, 1);
pCodec[streamIndex].audioBufCtrl.audioLen = 0;
pCodec[streamIndex].audioBufCtrl.pAudioChunk = NULL;
pCodec[streamIndex].audioBufCtrl.pAudioPos = NULL;
pCodec[streamIndex].pBuffer = (unsigned char *)av_malloc(MAX_AUDIO_FRAME_SIZE*2);
ASSERT(pCodec[streamIndex].pBuffer)
pCodec[streamIndex].pFrame = av_frame_alloc();
ASSERT(pCodec[streamIndex].pFrame)
break;
default:
break;
}
return 0;
}
//退出时需要把所有资源释放
int Player::codecContextExit(int streamIndex)
{
if(pCodec != NULL)
{
if(pCodec[streamIndex].pCodeCtx != NULL)
{
//FFMPEG中的函数是否为线程安全并不清楚
SDL_LockMutex(pMutex);
avcodec_close(pCodec[streamIndex].pCodeCtx);
pCodec[streamIndex].pCodeCtx = NULL;
SDL_UnlockMutex(pMutex);
}
}
return 0;
}
int Player::unpackLoop(void *pthis)
{
ASSERT(pthis)
Player *tmpthis = (Player *) pthis;
tmpthis->isUnpackClose = false;
//每个流对应一个解码器
tmpthis->pCodec = (Codec *)malloc(sizeof(Codec)*(tmpthis->pFormatCtx->nb_streams));
ASSERT(tmpthis->pCodec)
//循环初始化所有流,本来软件设计思路是可以应对多个视频流或者音频流,但是实际并不可以,因为音视频流索引各保存了一个
//如果出现多个音频视频流,记录的流索引也只是最后那个
for(int i=0; i < tmpthis->pFormatCtx->nb_streams; i++)
{
if(tmpthis->codecContextInit(i)<0)
{
printf("Stream%d init failed\n",i);
return -1;
}
}
//创建音视频线程
tmpthis->audioPlayerThread = SDL_CreateThread(audioLoop,"audio",(void *)pthis);
ASSERT(tmpthis->audioPlayerThread)
tmpthis->isAudioClose = false;
tmpthis->videoPlayerThread = SDL_CreateThread(videoLoop,"video",(void *)pthis);
ASSERT(tmpthis->videoPlayerThread)
tmpthis->isVideoClose = false;
tmpthis->pPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
ASSERT(tmpthis->pPacket)
tmpthis->gotAudioPicture = -1;
tmpthis->gotVideoPicture = -1;
while(!tmpthis->isClose)
{
//在音视频线程中,转换完一帧数据后会把此状态给设置为-1
if((tmpthis->gotAudioPicture<0) && (tmpthis->gotVideoPicture<0))
{
//1.从格式化上下文获取包数据
if(av_read_frame(tmpthis->pFormatCtx, tmpthis->pPacket) == 0)
{
//判断获取到的包数据是属于音频流还是视频流的
switch (tmpthis->pFormatCtx->streams[tmpthis->pPacket->stream_index]->codecpar->codec_type)
{
case AVMEDIA_TYPE_AUDIO:
tmpthis->audioStreamIndex = tmpthis->pPacket->stream_index;
//如果是音频流,就发送给之前初始化好的解码器
avcodec_send_packet(tmpthis->pCodec[tmpthis->audioStreamIndex].pCodeCtx, tmpthis->pPacket);
//发送后从解码器中接收帧,并不一定是一个包对应一帧数据,可能是发送多个包后才能接收到一帧
tmpthis->gotAudioPicture = avcodec_receive_frame(tmpthis->pCodec[tmpthis->audioStreamIndex].pCodeCtx,
tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame);
break;
case AVMEDIA_TYPE_VIDEO:
//视频流同上
tmpthis->videoStreamIndex = tmpthis->pPacket->stream_index;
avcodec_send_packet(tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx, tmpthis->pPacket);
tmpthis->gotVideoPicture = avcodec_receive_frame(tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx,
tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame);
break;
default:
break;
}
//必须要释放
av_packet_unref(tmpthis->pPacket);
}
}
}
printf("leave unpackLoop\n");
while(!(tmpthis->isVideoClose && tmpthis->isAudioClose));
if(tmpthis->pCodec)
{
free(tmpthis->pCodec);
tmpthis->pCodec = NULL;
}
tmpthis->codecContextExit(tmpthis->audioStreamIndex);
tmpthis->codecContextExit(tmpthis->videoStreamIndex);
tmpthis->isUnpackClose = true;
printf("unpackLoop free over\n");
return 0;
}
void Player::audioCallBack(void *pthis,unsigned char *stream,int len)
{
if(pthis == NULL)
return;
Player *tmpthis = (Player *)pthis;
//每次回调时必须先清空
SDL_memset(stream, 0, len);
//如果当前剩余播放为0则退出,这个变量在音频线程中被赋值
if(tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen == 0)
return;
//如果len大于audioLen,则播放audioLen
len=(len>tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen?tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen:len);
//复制len长度的音频数据到stream
memcpy(stream, tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioPos, len);
//当前播放指针后移len
tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioPos += len;
//剩余播放长度-len
tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen -= len;
}
int Player::audioLoop(void *pthis)
{
ASSERT(pthis)
Player *tmpthis = (Player *)pthis;
tmpthis->isAudioClose = false;
SDL_AudioSpec audioSpec;
int i = tmpthis->audioStreamIndex;
//把输出音频格式保存到audioSpec中,audioSpec为打开的音频设备参数
audioSpec.freq = tmpthis->pCodec[i].pAudioOutFmt->outSamplesRate;
audioSpec.format = AUDIO_S16SYS;
audioSpec.channels = tmpthis->pCodec[i].pAudioOutFmt->outChannels;
audioSpec.silence = 0;
audioSpec.samples = tmpthis->pCodec[i].pAudioOutFmt->outNbSamples;
audioSpec.callback = audioCallBack;
audioSpec.userdata = pthis;
//打开音频设备
if (SDL_OpenAudio(&audioSpec, NULL)<0){
printf("can't open audio.\n");
return -1;
}
//音频转化上下文申请内存
tmpthis->pCodec[i].pSwrConvertCtx = swr_alloc();
ASSERT(tmpthis->pCodec[i].pSwrConvertCtx)
//设置转换输入输出参数
tmpthis->pCodec[i].pSwrConvertCtx=swr_alloc_set_opts(tmpthis->pCodec[i].pSwrConvertCtx, \
tmpthis->pCodec[i].pAudioOutFmt->outChLayout, \
tmpthis->pCodec[i].pAudioOutFmt->outNbSampleFmt, \
tmpthis->pCodec[i].pAudioOutFmt->outSamplesRate, \
av_get_default_channel_layout(tmpthis->pCodec[i].pCodeCtx->channels), \
tmpthis->pCodec[i].pCodeCtx->sample_fmt , \
tmpthis->pCodec[i].pCodeCtx->sample_rate,0, NULL);
//音频转换初始化
swr_init(tmpthis->pCodec[i].pSwrConvertCtx);
//开始播放
SDL_PauseAudio(0);
while(!tmpthis->isClose)
{
//如果=0则代表在解包线程中,一帧音频数据被解码成功
if(tmpthis->gotAudioPicture == 0)
{
//开始转换
swr_convert(tmpthis->pCodec[tmpthis->audioStreamIndex].pSwrConvertCtx, \
&tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer, \
MAX_AUDIO_FRAME_SIZE, \
(const uint8_t **)tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame->data , \
tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame->nb_samples);
tmpthis->gotAudioPicture = -1;
//等待音频播放回调播放完当前音频数据
while(tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen>0)
SDL_Delay(1);
//新的音频数据赋值
tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioChunk = tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer;
tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.audioLen =tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt->outFrameSize;
tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioPos = tmpthis->pCodec[tmpthis->audioStreamIndex].audioBufCtrl.pAudioChunk;
}
}
printf("leave audioLoop\n");
av_frame_free(&tmpthis->pCodec[tmpthis->audioStreamIndex].pFrame);
if(tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt!=NULL)
{
free(tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt);
tmpthis->pCodec[tmpthis->audioStreamIndex].pAudioOutFmt = NULL;
}
swr_free(&tmpthis->pCodec[tmpthis->audioStreamIndex].pSwrConvertCtx);
tmpthis->pCodec[tmpthis->audioStreamIndex].pSwrConvertCtx = NULL;
SDL_CloseAudio();
av_free(tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer);
tmpthis->pCodec[tmpthis->audioStreamIndex].pBuffer = NULL;
tmpthis->isAudioClose = true;
printf("audioLoop free over\n");
return 0;
}
int Player::videoLoop(void *pthis)
{
ASSERT(pthis)
Player *tmpthis = (Player *)pthis;
tmpthis->isVideoClose = false;
SDL_Window *screen;
//创建窗口
screen = SDL_CreateWindow("Player",SDL_WINDOWPOS_UNDEFINED,SDL_WINDOWPOS_UNDEFINED,\
tmpthis->screenW,tmpthis->screenH,SDL_WINDOW_SHOWN);
if(!screen)
{
printf("Can't Create Window -- %s\n",SDL_GetError());
return -1;
}
//创建渲染器
SDL_Renderer* sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
//清空渲染器
SDL_RenderClear(sdlRenderer);
//创建纹理,根据不同的格式创建不同的纹理,这个就是把FFMPEG中的格式转换为SDL中对应的格式
SDL_Texture* sdlTexture = NULL;
if(tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx->pix_fmt == AV_PIX_FMT_YUV420P)
sdlTexture = SDL_CreateTexture(sdlRenderer,SDL_PIXELFORMAT_IYUV,\
SDL_TEXTUREACCESS_STREAMING,tmpthis->screenW,tmpthis->screenH);//SDL_PIXELFORMAT_RGB24 /SDL_PIXELFORMAT_IYUV
else
sdlTexture = SDL_CreateTexture(sdlRenderer,SDL_PIXELFORMAT_RGB24,\
SDL_TEXTUREACCESS_STREAMING,tmpthis->screenW,tmpthis->screenH);//SDL_PIXELFORMAT_RGB24 /SDL_PIXELFORMAT_IYUV
//描述一个矩形
SDL_Rect sdlRect;
//矩形高
sdlRect.h = tmpthis->screenH;
//矩形宽
sdlRect.w = tmpthis->screenW;
while(!tmpthis->isClose)
{
if(tmpthis->gotVideoPicture == 0)
{
//转换视频格式
sws_scale(tmpthis->pCodec[tmpthis->videoStreamIndex].pSwsConvertCtx,
(const unsigned char* const*)tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame->data,
tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame->linesize,
0,
tmpthis->pCodec[tmpthis->videoStreamIndex].pCodeCtx->height,
tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->data,
tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->linesize);
tmpthis->gotVideoPicture = -1;
//设置纹理像素数据
SDL_UpdateTexture( sdlTexture, &sdlRect, \
tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->data[0], \
tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame->linesize[0] ); \
SDL_RenderClear( sdlRenderer );
//纹理复制到渲染器
SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, &sdlRect);
//显示
SDL_RenderPresent( sdlRenderer );
}
}
printf("leave videoLoop\n");
av_free(tmpthis->pCodec[tmpthis->videoStreamIndex].pBuffer);
tmpthis->pCodec[tmpthis->videoStreamIndex].pBuffer = NULL;
av_frame_free(&tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame);
tmpthis->pCodec[tmpthis->videoStreamIndex].pFrame = NULL;
av_frame_free(&tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame);
tmpthis->pCodec[tmpthis->videoStreamIndex].videoOutFmt.pOutFrame = NULL;
sws_freeContext(tmpthis->pCodec[tmpthis->videoStreamIndex].pSwsConvertCtx);
tmpthis->pCodec[tmpthis->videoStreamIndex].pSwsConvertCtx = NULL;
SDL_DestroyWindow(screen);
tmpthis->isVideoClose = true;
printf("videoLoop free over\n");
return 0;
}
player.cpp头文件
#ifndef PLAYER_H
#define PLAYER_H
extern "C"
{
#include
#include
#include
#include
#include
#include
}
#include
//版本号
#define VERSION "0.1v\n"
//音频单帧最大字节数
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
//播放器命名空间
namespace player
{
//控制音频播放结构体,主要传递音频数据到音频播放回调函数
typedef struct AUDIOBUFCTRL
{
//音频播放缓冲区首地址
unsigned char *pAudioChunk;
//当前播放音频数据首指针
unsigned char *pAudioPos;
//当前播放剩余音频数据长度
unsigned int audioLen;
}AudioBufCtrl;
//音频转换后输出格式
typedef struct AUDIOOUTFMT
{
//输出单个通道采样数据字节数
int outNbSamples;
//输出采样率
int outSamplesRate;
//输出通道数
int outChannels;
//输出帧大小
int outFrameSize;
//输出通道格式
unsigned long outChLayout;
//采用格式
AVSampleFormat outNbSampleFmt;
}AudioOutFmt;
// 视频转换后输出格式
typedef struct VIDEOOUTFMT
{
//视频转换后的输出帧
AVFrame *pOutFrame;
}VideoOutFmt;
//音视频编解码器相关变量
typedef struct CODEC
{
//编解码器上下文
AVCodecContext *pCodeCtx;
//编解码器
const AVCodec *pAvCodec;
//单个音频/视频帧
AVFrame *pFrame;
//音频格式转换上下文
struct SwsContext *pSwsConvertCtx;
//视频格式转换上下文
struct SwrContext *pSwrConvertCtx;
//音视频数据缓存
unsigned char *pBuffer;
//音频转换输出格式
AudioOutFmt *pAudioOutFmt;
//音频播放控制结构体
AudioBufCtrl audioBufCtrl;
//视频转换输出格式(其实是转换输出缓存)
VideoOutFmt videoOutFmt;
}Codec;
//播放器类
class Player
{
public:
//输入文件名
char *filename;
//event线程结束标志
bool isEventClose;
//格式化上下文
AVFormatContext *pFormatCtx;
//数据包
AVPacket *pPacket;
//音视频编解码器
Codec *pCodec;
//构造函数
Player(const char *file);
//析构函数
~Player();
//打印版本
int versionShow(void);
//初始化
int init(void);
//退出初始化i
int exit(void);
//事件线程循环
static int eventLoop(void *pthis);
//解包线程循环
static int unpackLoop(void *pthis);
//音频播放循环
static int audioLoop(void *pthis);
//视频播放循环
static int videoLoop(void *pthis);
//音频播放回调
static void audioCallBack(void *pthis,unsigned char *stream,int len);
//编解码器初始化,每个流对应一个
int codecContextInit(int streamIndex);
//编解码器关闭
int codecContextExit(int streamIndex);
private:
//窗口是否关闭
bool isClose;
//解包线程是否结束
bool isUnpackClose;
//音频线程是否结束
bool isAudioClose;
//视频线程是否结束
bool isVideoClose;
//窗口宽度
int screenW;
//窗口高度
int screenH;
//音频流索引
int audioStreamIndex;
//视频流索引
int videoStreamIndex;
//获取音频帧状态
int gotAudioPicture;
//获取视频帧状态
int gotVideoPicture;
//互斥量
SDL_mutex *pMutex;
//事件线程指针
SDL_Thread *eventThread;
//解包线程指针
SDL_Thread *unpackThread;
//音频线程指针
SDL_Thread *audioPlayerThread;
//视频线程指针
SDL_Thread *videoPlayerThread;
//键盘事件处理
int keyEvent(int key);
};
} // namespace audioPlayer
#endif
main函数所在文件
#include "player.h"
using namespace player;
int main(int argc,char *argv[])
{
if(argc!=2)
{
printf("参数异常\n");
return 0;
}
Player player(argv[1]);
player.versionShow();
player.init();
while(!player.isEventClose);
return 0;
}
编译脚本
#! /bin/sh
g++ player.cpp player_main.cpp -g -o player.a -I /usr/local/ffmpeg/include -I ./ -L /usr/local/ffmpeg/lib \
-lSDL2main -lSDL2 -lavformat -lavcodec -lavutil -lswresample -lswscale -lm -lpthread
除了main函数,分为四个线程,事件线程、解包线程、音频线程、视频线程
类实例化,文件打开读取流,初始化等等
主要是处理了窗口关闭这个事件,窗口关闭后会关闭其他三个线程,等待其他线程资源释放完后再退出
主要是先初始化解码器,然后从音视频文件中取包,然后发送给对应的解码器,然后收帧,收到数据帧后置标志位
判断上一步的标志位,有可以转换的帧则转换,转换完成后复制到缓冲,等待音频播放回调播放
判断解包线程的标志位,同音频线程差不多,不过视频可以直接播放,不用回调
实际测试这段代码,会发现播放视频没有雷神的视频播放器流畅,主要是因为视频每帧的播放时间没有控制,而是解码转换好了就播放,而雷神代码中,粗略的对每帧延时40ms,也就是1秒播放25帧,相当于25FPS。但最好的解决办法是音视频同步。
[email protected]:Yi-Deng-Da-Shi/player.git
(ffmpeg)ffmpeg+SDL2的简单音视频播放器2