自制流媒体播放器(Qt+ffmpeg+SDL)小结

最近一边温习化学,一边又念念不忘自己那天捣鼓的软件。毕业多年,好多兴趣爱好真的只是有兴趣的时候突然才想起,曾经觉得枯燥无味的知识温习起来却又感觉特别有意思。人就是这么奇怪,年少无知的时候,大把青春不知道该怎么挥霍。当真正发现数理化,天文地理历史人文科技等都有意思的时候,时间再也不惯着你了。好可惜,不懂事时人年轻,懂事时候人已老。

fmpeg,在我这里就是当作视频源编解码库使用;

SDL,封装DirectX的库,可直接调用电脑底层驱动联动硬件播放声音和渲染图像;

当然,由于平时维护Qt客户端,也顺手将Qt用上了。

main.cpp就比较简单

#include "FFMPEG_LEARN.h"
#include 

#undef main //由于SDL库自己有一个mian,为了避免冲突故添加#undef main
int main(int argc, char *argv[])
{
	QApplication a(argc, argv);
	FFMPEG_LEARN w;
	w.show();
	return a.exec();

}
//入口函数就这么简单,基本不做改动,FFMPEG_LEARN就是窗口了,这主要是Qt的用法

视频播放当然需要一个线程来对视频源进行解码转码,这就是自定义的解码线程类

#pragma once

#include 
#include 
#include 

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"  
#include "libavutil/imgutils.h"    
#include "libswresample/swresample.h"    
}

class DecodecThread : public QThread
{
	Q_OBJECT

public:
	DecodecThread(QObject *parent);
	~DecodecThread();


	void UpdateFormatContext(const char* mieda_source);//更新音视频源
	void ChangeMediaSource(const char* mieda_source);
	void ChangOutputPixelFormat(const AVPixelFormat pixelFormat);
	void ChangOutChLayout(const uint64_t out_ch_layout);
	void ChangAVSampleFormat(const AVSampleFormat out_sample_fmt);

	//获取采样率
	int GetAudioSampleRate()
	{
		return m_out_sample_rate;
	}

private:
	void InitDecodec();//初始化解码器
	int InitDecodecForVideo();
	int InitDecodecForAudio();
	void ReleaseDecodec();//释放解码器
private:
	
	QString m_strMiedaSourcePath;//Media Source Path
	QString m_errInfo;

	AVFormatContext *pFormatCtx;//Media Context

	AVPacket *packet;//解码前数据包

	 //流队列中,视频流所在的位置 
	int m_videoStream;//视频位置
	int m_audioStream;//音频位置

	AVCodecContext *m_pVideoCodecCtx;//视频解码器上下文环境
	AVCodecContext* m_pAudioCodecCtx;//音频解码器上下文环境

	AVCodec *m_pVideoCodec;//视频解码器
	AVCodec* m_pAudioCodec;//音频解码器

	//输出缓存
	uint8_t *m_pVideoOutBuffer;//视频
	uint8_t* m_pAudioOutBuffer;//音频

	 //输出缓存大小  
	int m_video_out_buffer_size;
	int m_audio_out_buffer_size;

	AVFrame*  m_pOriginalVideoFrame;//存储解码出的原始图像帧
	AVFrame* m_pOutputVideoFrame;
	AVFrame* m_pOriginalAudioFrame;
	AVFrame* m_pOutputAudioFrame;
	struct SwsContext *m_img_convert_ctx;//(图像转码器)
	struct SwrContext *m_audio_convert_ctx;//(音频转码器)


   //每秒多少帧数据,也就是常说的帧率FPS
	int m_videoFPS;

	 //输出的像素格式
	enum AVPixelFormat m_OutputPixelFormat;

	//audio format param
	int m_nb_channels;//音频声道数量  
	enum AVSampleFormat m_out_sample_fmt;//输出的采样格式 16bit PCM  
	int m_out_sample_rate;//输出采样率  
	uint64_t m_out_ch_layout;//输出的声道布局:立体声  
	int m_out_nb_samples;//音频输出没通道采样数
	int m_nInitRet;//初始化情况
	QMutex m_MutexStopCircleRunByInitRet;//主要是为了让自己看的懂,就是一线程锁

	int m_nAudioInitRet;//用于标记音频转码准备结果
	int m_nVideoInitRet;//用于标记视频转码准备结果

	bool m_bLastDecodecWorkFinished;//退出的时候,解码线程必须先退出
	QMutex m_MutexForDecodecFinished;//写的复杂了555,也就是上面值的互斥锁
	virtual void run();
signals:
	void sig_DecodecOnePicture(QImage srcImage,const QString strID);
	void sig_DecodecOneAudio(uint8_t* pAudioOutBuffer, int audio_out_buffer_size, const QString strID);
	void sig_out_audio_param(int sample_rate,  int nb_channels,int samlpes);
};

从头文件基本能看个大概这个类是做什么的了吧,为了尽量降低程序耦合度,我用了些Qt的信号槽来传递参数,下面是cpp文件

#include "DecodecThread.h"
#include "log.h"//一个专门用于打印日志的头文件,程序员必须养成的日志习惯




#define MAX_AUDIO_FRAME_SIZE 192000 // 没仔细研究过为啥是192000,直接搬来用了哈


//在构造函数里,直接简单粗暴的写固定的转码输出格式,主要是图方便,后续优化可根据输入传参
DecodecThread::DecodecThread(QObject *parent)
	: QThread(parent)
{
	m_errInfo = "";
	m_strMiedaSourcePath = "";
	pFormatCtx = NULL;
	m_videoStream = -1;
	m_audioStream = -1;
	m_pVideoCodecCtx = NULL;
	m_pAudioCodecCtx = NULL;
	m_pVideoCodec = NULL;
	m_pAudioCodec = NULL;
	m_pOriginalVideoFrame = NULL;
	m_pOutputAudioFrame = NULL;
	m_pOutputVideoFrame = NULL;
	m_pOutputAudioFrame = NULL;
	m_pVideoOutBuffer = NULL;
	m_pAudioOutBuffer = NULL;
	m_img_convert_ctx = NULL;
	m_audio_convert_ctx = NULL;
	m_OutputPixelFormat = AV_PIX_FMT_RGB24;
	packet = NULL;//用于保存每一帧未解码数据
	m_out_sample_fmt = AV_SAMPLE_FMT_S16;//输出的采样格式 16bit PCM  
	m_out_ch_layout = AV_CH_LAYOUT_STEREO;//输出的声道布局:立体声  
	m_nInitRet = -1;//0表示成功,其他表示某个环节有误
	m_nAudioInitRet = -1;
	m_nVideoInitRet = -1;
	m_bLastDecodecWorkFinished = true;
	InitDecodec();
	//UpdateFormatContext("");

}

DecodecThread::~DecodecThread()
{
    //通知结束线程
	{
		QMutexLocker locker(&m_MutexStopCircleRunByInitRet);
		m_nInitRet = -1;//run函数循环运行时候的判断
	}
 //等待线程结束
	while (1)
	{
		{
			QMutexLocker locker(&m_MutexForDecodecFinished);
			if (m_bLastDecodecWorkFinished == true)
			{
				break;
			}
		}
		msleep(100);
	}
   //释放资源
	ReleaseDecodec();
}
//更新视频源
void DecodecThread::UpdateFormatContext(const char* mieda_source)
{
    
    //通知解码线程停止
	{
		QMutexLocker locker(&m_MutexStopCircleRunByInitRet);
	    m_nInitRet = -1;//run函数循环运行时候的判断
	}

	QString tempStrSource = mieda_source;
	if (tempStrSource.isEmpty())
	{
		m_nInitRet = 1;
		m_errInfo = " mieda_source is empty";
		return;
	}
	m_strMiedaSourcePath = mieda_source;

   //等待解码线程停止再指向下一步
	while (1)
	{
		{
			QMutexLocker locker(&m_MutexForDecodecFinished);
			if (m_bLastDecodecWorkFinished == true)
			{
				break;
			}
		}
		msleep(100);
	}
 //释放当前资源
	ReleaseDecodec();
 //重新开始
	start();
}

void DecodecThread::ChangeMediaSource(const char* mieda_source)
{
	m_strMiedaSourcePath = mieda_source;
}

void DecodecThread::ChangOutputPixelFormat(const AVPixelFormat pixelFormat)
{
	m_OutputPixelFormat = pixelFormat;
}

void DecodecThread::ChangOutChLayout(const uint64_t out_ch_layout)
{
	m_out_ch_layout = out_ch_layout;
}

void DecodecThread::ChangAVSampleFormat(const AVSampleFormat out_sample_fmt)
{
	m_out_sample_fmt = out_sample_fmt;
}

void DecodecThread::InitDecodec()
{
	//初始化FFMPEG  调用了这个才能正常使用编码器和解码器
	av_register_all();
	avformat_network_init();//支持网络流  
   //分配一个AVFormatContext,FFMPEG[所有的操作]都要通过这个AVFormatContext来进行
	pFormatCtx = avformat_alloc_context();
}

int DecodecThread::InitDecodecForVideo()
{
	if (m_videoStream == -1)
	{
		m_errInfo.append("unknown videoStream Failed");
		return -1;
	}
	m_pVideoCodecCtx = pFormatCtx->streams[m_videoStream]->codec;//解码器上下文环境
	m_pVideoCodec = avcodec_find_decoder(m_pVideoCodecCtx->codec_id);
	if (m_pVideoCodec == NULL)
	{
		m_errInfo.append(",m_pVideoCodec is null");
		return -1;
	}
	//打开视频解码器
	if (avcodec_open2(m_pVideoCodecCtx, m_pVideoCodec, NULL) < 0) {
		m_errInfo.append(",open video avcodec  Failed");
		return -1;
	}
	m_pOriginalVideoFrame = av_frame_alloc();
	m_pOutputVideoFrame = av_frame_alloc();

	//======================video转码准备==============================================================//
	//设置转码器
	    m_img_convert_ctx = sws_getContext(m_pVideoCodecCtx->width, m_pVideoCodecCtx->height,
		m_pVideoCodecCtx->pix_fmt, m_pVideoCodecCtx->width, m_pVideoCodecCtx->height,
		m_OutputPixelFormat, SWS_BICUBIC, NULL, NULL, NULL);

	//(计算视频输出缓存)
	int numBytes = avpicture_get_size(m_OutputPixelFormat, m_pVideoCodecCtx->width, m_pVideoCodecCtx->height);

	//开辟(输出缓存)
	m_pVideoOutBuffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

	//avpicture_fill函数将ptr指向的数据填充到picture内,但并没有拷贝,只是将picture结构内的     data指针指向了ptr的数据。
	avpicture_fill((AVPicture *)m_pOutputVideoFrame, m_pVideoOutBuffer, m_OutputPixelFormat, m_pVideoCodecCtx->width, m_pVideoCodecCtx->height);

av_dump_format(pFormatCtx, 0, m_strMiedaSourcePath.toLatin1().data(), 0); //输出视频信息
//==================================================video转码准备==============================================================//
	writeLog("视频转码准备", "无", "无");
	return 0;
	
}

int DecodecThread::InitDecodecForAudio()
{
	if (m_audioStream == -1)
	{
		m_errInfo.append("unknown audioStream Failed");
		return -1;
	}
	//根据音频类型查找音频解码器
	m_pAudioCodecCtx = pFormatCtx->streams[m_audioStream]->codec;
	m_pAudioCodec = avcodec_find_decoder(m_pAudioCodecCtx->codec_id);

	if (m_pAudioCodec == NULL)
	{
		m_errInfo.append(",m_pAudioCodec is null");
		return -1;
	}
	//打开音频解码器
	if (avcodec_open2(m_pAudioCodecCtx, m_pAudioCodec, NULL) < 0)
	{
		m_errInfo.append(",open audio avcodec Failed");
		return -1;
	}
	
	m_pOriginalAudioFrame = av_frame_alloc();
	m_pOutputAudioFrame = av_frame_alloc();

	//=================================================================音频转码准备====================================//
	enum AVSampleFormat in_sample_fmt = m_pAudioCodecCtx->sample_fmt;//输入的采样格式  
	m_out_sample_rate = m_pAudioCodecCtx->sample_rate;//输入的采样率    音频采样率是指录音设备在一秒钟内对声音信号的采样次数,采样频率越高声音的还原就越真实越自然。
	uint64_t in_ch_layout = m_pAudioCodecCtx->channel_layout;//输入的声道布局  声道是指声音在录制或播放时在不同空间位置采集或回放的相互独立的音频信号,所以声道数也就是声音录制时的音源数量或回放时相应的扬声器数量。
	m_audio_convert_ctx = swr_alloc();//获得音频转码器									
	swr_alloc_set_opts(m_audio_convert_ctx, m_out_ch_layout, m_out_sample_fmt, m_out_sample_rate, in_ch_layout, in_sample_fmt, m_out_sample_rate, 0, NULL);  //初始化音频转码器
	swr_init(m_audio_convert_ctx);
	m_nb_channels = av_get_channel_layout_nb_channels(m_out_ch_layout);//获取声道个数 
	m_out_nb_samples = m_pAudioCodecCtx->frame_size;//如果为m_out_nb_samples==0,则表示音频编码器支持在每个呼叫中​​接收不同数量的采样。#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE(1 << 16)

	if (m_out_nb_samples)
	{
		int SamplesSize = av_samples_get_buffer_size(NULL, m_nb_channels, m_out_nb_samples, m_out_sample_fmt, 1);
		//m_pAudioOutBuffer = (uint8_t *)av_malloc(SamplesSize);//存储pcm数据 
		m_pAudioOutBuffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
		emit sig_out_audio_param(m_out_sample_rate, m_nb_channels, m_out_nb_samples);
	
		//writeLog("音频转码准备", "成功", QString("输出采样率:%1,输出采样率:%2").arg(QString::number(m_cx, 10), QString::number(m_cy, 10)).toStdString().c_str());
		writeLog("音频转码准备", "OK", "无");
	}
	else
	{
		//为0表示每个呼叫采样数不确定?
		m_pAudioOutBuffer = (uint8_t *)av_malloc(1024);//存储pcm数据  test															
		emit sig_out_audio_param(m_out_sample_rate, m_nb_channels, 1024);
		writeLog("音频转码准备", "OK1024", "无");
	}
	//======================音频转码准备====================================//
	return 0;
}

void DecodecThread::ReleaseDecodec()
{

	//根据判断释放
	if (m_img_convert_ctx)
	{
		sws_freeContext(m_img_convert_ctx);
		m_img_convert_ctx = NULL;
	}

	if (m_audio_convert_ctx)
	{
		swr_free(&m_audio_convert_ctx);
	}

	if (m_pVideoOutBuffer)
	{
		av_free(m_pVideoOutBuffer);
		m_pVideoOutBuffer = NULL;

	}
	if (m_pAudioOutBuffer)
	{
		av_free(m_pAudioOutBuffer);
		m_pAudioOutBuffer = NULL;
	}

	if (m_pOutputVideoFrame)
	{
		av_free(m_pOutputVideoFrame);
		m_pOutputVideoFrame = NULL;
	}
	if (m_pOutputAudioFrame)
	{
		av_free(m_pOutputAudioFrame);
		m_pOutputAudioFrame = NULL;
	}
	
	if (m_pVideoCodecCtx)
	{
		avcodec_close(m_pVideoCodecCtx);
		m_pVideoCodecCtx = NULL;
	}
	if (m_pAudioCodecCtx)
	{
		avcodec_close(m_pAudioCodecCtx);
		m_pAudioCodecCtx = NULL;
	}
	

	if (pFormatCtx)
	{
		avformat_close_input(&pFormatCtx);
	}

}

void DecodecThread::run()
{

	QByteArray byteArry = m_strMiedaSourcePath.toLatin1();
	char* charSource = byteArry.data();

	if (!pFormatCtx)
	{
		pFormatCtx = avformat_alloc_context();
	}

	AVDictionary* opts = NULL;
	av_dict_set(&opts, "stimeout", "10000000", 0);// 该函数是微秒 意思是10秒后没拉取到流就代表超时
	//avformat_open_input()默认是阻塞的,用户可以通过设置“ic->flags |= AVFMT_FLAG_NONBLOCK; ”设置成非阻塞(通常是不推荐的);或者是设置timeout设置超时时间;或者是设置interrupt_callback定义返回机制。
	//打开视频文件,信息保存在pFormatCtx中
	if (avformat_open_input(&pFormatCtx, charSource, NULL, &opts) != 0) {
		m_nInitRet = 1;
		m_errInfo = " open media source Failed";
		return;
	}
	//根据打开的文件寻找其流信息
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		m_nInitRet = 2;
		m_errInfo = "find stream info Failed";
		return;
	}
	//循环查找视频中包含的流信息,直到找到视频类型的流
	//便将其记录下来 保存到videoStream变量中
	m_videoStream = -1;
	m_audioStream = -1;

	for (int i = 0; i < pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			m_videoStream = i;
		}
		else if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
			m_audioStream = i;
		}

	}
	m_nVideoInitRet = InitDecodecForVideo();//初始化视频转码
	m_nAudioInitRet = InitDecodecForAudio();//初始化音频转码
	if (m_nVideoInitRet == -1 && m_nAudioInitRet == -1)
	{
		m_nInitRet = -1;
		return;
	}

	packet = av_packet_alloc();
	m_nInitRet = 0;


	{
		QMutexLocker locker(&m_MutexForDecodecFinished);
		m_bLastDecodecWorkFinished = false;
	}

	while (1)
	{

                msleep(15);//主要是防止解码音频过快一直占有信号,导致音频输出断片,处理比较粗暴 嘻嘻。。

                
       
		{
			QMutexLocker locker(&m_MutexStopCircleRunByInitRet);
				if (m_nInitRet == -1)
				{
					break;
				}
		}

		int ret = -1;
		if (av_read_frame(pFormatCtx, packet) < 0)
		{
			//break; 
			continue;
		}


		int got_picture = 0;
		int got_frame = 0;
		if (packet->stream_index == m_videoStream)
		{

			//视频里面的数据是经过编码压缩的,因此这里我们需要将其解码
			ret = avcodec_decode_video2(m_pVideoCodecCtx, m_pOriginalVideoFrame, &got_picture, packet);
			if (ret < 0) {
				return;
			}

			//基本上所有解码器解码之后得到的图像数据都是YUV420的格式,而这里我们需要将其保存成图片文件,因此需要将得到的YUV420数据转换成RGB格式,转换格式也是直接使用FFMPEG来完成:
			if (got_picture) {
				sws_scale(m_img_convert_ctx,//图片转码上下文 
					(uint8_t const * const *)m_pOriginalVideoFrame->data,//原始数据 
					m_pOriginalVideoFrame->linesize,//原始参数  
					0, //转码开始游标,一般为0  
					m_pVideoCodecCtx->height,//行数  
					m_pOutputVideoFrame->data,//转码后的数据  
					m_pOutputVideoFrame->linesize);
				QImage image(m_pOutputVideoFrame->data[0], m_pVideoCodecCtx->width, m_pVideoCodecCtx->height, QImage::Format_RGB888);
				QImage CopyImage = image.copy();
				emit sig_DecodecOnePicture(CopyImage, m_strMiedaSourcePath);

			}
		}
		else if (packet->stream_index == m_audioStream)
		{
			if (avcodec_decode_audio4(m_pAudioCodecCtx, m_pOriginalAudioFrame, &got_frame, packet) >= 0)
			{
				if (got_frame)
				{
					int auto_size = swr_get_out_samples(m_audio_convert_ctx, m_pOriginalAudioFrame->nb_samples);
					//音频格式转换(音频重采样)  
					swr_convert(m_audio_convert_ctx,//音频转换上下文  
						&m_pAudioOutBuffer,//输出缓存  
						(m_out_nb_samples ? m_out_nb_samples : 1024),//MAX_AUDIO_FRAME_SIZE,//(m_out_nb_samples? m_out_nb_samples:1024),//每次输出大小  //每个声道的大小
						(const uint8_t **)m_pOriginalAudioFrame->data,//输入数据  
						m_pOriginalAudioFrame->nb_samples);//输入  
					if (m_out_nb_samples)
					{
						  m_audio_out_buffer_size = av_samples_get_buffer_size(NULL, m_nb_channels, m_out_nb_samples, m_out_sample_fmt, 1);
						//  m_audio_out_buffer_size = av_samples_get_buffer_size(NULL, m_nb_channels, m_pOriginalAudioFrame->nb_samples, m_out_sample_fmt, 1);
						emit sig_DecodecOneAudio(m_pAudioOutBuffer, m_audio_out_buffer_size, m_strMiedaSourcePath);
					}
					else
					{
						emit sig_DecodecOneAudio(m_pAudioOutBuffer,1024, m_strMiedaSourcePath);
					}
				}
			}
			else
			{
				//有的时候应该考虑静音的时候
				memset(m_pAudioOutBuffer, 0, 1024);
				emit sig_DecodecOneAudio(m_pAudioOutBuffer, 1024, m_strMiedaSourcePath);
			}
     
		}
		av_free_packet(packet);
	}
	{
		QMutexLocker locker(&m_MutexForDecodecFinished);
		m_bLastDecodecWorkFinished = true;
	}
	
}

这里有必要对m_out_nb_samples说明一下,有时候我发现有的根据函数返回的是0,是因为他发现支持非固定采样数帧的音频源,我这里没多去处理,简单粗暴当做1024了,这显然是不科学的,但保证了我程序不会炸,试了下MP3总是前面部分(1分钟左右)播放不出来,不知道为啥,既然不是工作,不想弄先不弄555,架构搞清楚就好。

从上面可知道,ffmpeg解码转码出可以直接由底层播放或渲染的数据了。一般而言,音频视频还有一个同步需要处理,这里的信号槽可以是转给一个管理器(那自然就需要将dts和pts数据一并保存),也可以直接拿出去放了,一般网络串流就直接放了。

我这里音频播放处理也是继承了一个线程类在使用.h 和.cpp文件如下

#pragma once

#include 
#include 
#include 
#include 
#include
#include

extern "C"
{
#include 
#include 
#include 
#include 
#include 
#include 
}

using namespace std;

#define SDL_AUDIO_BUFFER_SIZE 1024
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000




class AudioPlayer: public QThread
{
	Q_OBJECT
public:
	AudioPlayer(QObject *parent);
	~AudioPlayer();

public:
	//sdl通过这个回调函数 获取数据
	static void audio_callback(void *userdata, Uint8 *stream, int len);
	static void audio_callback_ex(void *userdata, Uint8 *stream, int len);
	public slots:
  //通过槽函数里面的参数来初始化播放器
	void slot_InitSDL_Audio(int SampleRate, int Channels,int Samples);
  //保存音频数据
	void slot_GetOneAudioBuffer(uint8_t* pAudioOutBuffer, int audio_out_buffer_size, const QString strID);
	
protected:
	void run();
public:
	
 static Uint8 m_sAudio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];//音频缓冲容器
 static unsigned int m_sAudio_buf_size;// = 0;//容器中未被取走缓存音频数据大小
 static Uint8*  m_sAudio_buf_index;// = 0;//sdl取数据位置
 static QMutex m_MutexOpBuffer;
 static QMutex m_MutexOpQue;
 static QQueue  m_DecodecBufferQue;//解码队列
 QMutex m_MutexThreadRun;
 bool bThreadRun;

 QMutex m_MutexThreadSafelyOut;
 bool bThreadSafelyOut;
};






#include "AudioPlayer.h"



Uint8 AudioPlayer::m_sAudio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2] = { 0 };
unsigned int AudioPlayer::m_sAudio_buf_size = 0;
Uint8*  AudioPlayer::m_sAudio_buf_index = m_sAudio_buf;
QMutex AudioPlayer::m_MutexOpBuffer;
QMutex AudioPlayer::m_MutexOpQue;
QQueue  AudioPlayer::m_DecodecBufferQue;


AudioPlayer::AudioPlayer(QObject *parent)
	:QThread(parent)
{
	bThreadRun = true;
	bThreadSafelyOut = true;
}


AudioPlayer::~AudioPlayer()
{
	SDL_Quit();
	{
		QMutexLocker locker(&m_MutexThreadRun);
		bThreadRun = false;
	}

	while (1)
	{
		{
			QMutexLocker locker(&m_MutexThreadSafelyOut);
			if (bThreadSafelyOut == true)
			{
				break;
			}
		}
		msleep(100);
	}

	for (; m_DecodecBufferQue.count() > 0;)
	{
		unsigned char* c = m_DecodecBufferQue.dequeue();
		delete[] c;
	}

}


void AudioPlayer::slot_InitSDL_Audio(int SampleRate, int Channels, int Samples)
{
	//  打开SDL播放设备 - 开始
	SDL_LockAudio();

	if (SDL_Init(SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		printf("Could not initialize SDL - %s\n", SDL_GetError());
		return;
	}

	SDL_AudioSpec spec;
	SDL_AudioSpec wanted_spec;
	wanted_spec.freq = SampleRate;// audioCodecCtx->sample_rate; 44100 ------------ 采样率
	wanted_spec.format = AUDIO_S16SYS;//格式:这告诉SDL我们将给它什么格式。 “S16SYS”中的“S”代表“有符号”,16表示每个样本长16位,“SYS”表示顺序取决于您所在的系统。这是avcodec_decode_audio2给我们音频的格式。
	wanted_spec.channels = Channels;// audioCodecCtx->channels; 2----频道:音频频道的数量。
	wanted_spec.silence = 0;//沉默:这是表示沉默的价值。由于音频是有符号的,0当然是通常的值。
	wanted_spec.samples = Samples;
	wanted_spec.callback = audio_callback;//非多线程版本
	//wanted_spec.callback = audio_callback_ex;//多线程版本
	//wanted_spec.userdata =  这里是回调函数自定义参数,依据情况定义
	if (SDL_OpenAudio(&wanted_spec, &spec) < 0)
		//if (SDL_OpenAudio(&wanted_spec, NULL) < 0)
	{
		//fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return;
	}

	SDL_UnlockAudio();//开始播放
	SDL_PauseAudio(0);

	//start();
}


void AudioPlayer::slot_GetOneAudioBuffer(uint8_t* pAudioOutBuffer, int audio_out_buffer_size, const QString strID)
{
	//前面四个字节表示长度,后面的表示数据缓存,主要是考虑到有些采样数是不一样的
	unsigned char* c = new unsigned char[4 + audio_out_buffer_size];
	memcpy(c, &audio_out_buffer_size, 4);

	//第5个字节开始代表换成的音频数据
	memcpy(c + 4, pAudioOutBuffer, audio_out_buffer_size);
	//	printf("%s\n", c + 4);
	QMutexLocker locker(&m_MutexOpQue);
	m_DecodecBufferQue.enqueue(c);
}



void  AudioPlayer::audio_callback(void *userdata, Uint8 *stream, int len)
{
	static const int Max_audio_data_size = (AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2;
	static uint8_t audio_buf[Max_audio_data_size];//缓存区
	static unsigned int audio_buf_size = 0;//缓存区大小
	static unsigned int audio_buf_index = 0;//当前应当取出缓存所在首地址
	int len1;//用于记录每次取出缓存大小
	int audio_data_size;//缓存最大长度


	SDL_memset(stream, 0, len);
	//喂饱了才跳出循环^o^
	while (len > 0)
	{
		//当索引位置超过最大时,表示缓存应该更新了
		if (audio_buf_index >= audio_buf_size)
		{
			audio_buf_size = 0;
			QMutexLocker locker(&m_MutexOpQue);
			if (m_DecodecBufferQue.count() > 0)
			{
				while (m_DecodecBufferQue.count() > 0)
				{
					unsigned char* c = m_DecodecBufferQue.dequeue();
					int nBuffSize = *((int*)c);
					if (audio_buf_size + nBuffSize >= Max_audio_data_size)
					{
						break;//跳出填充缓存循环
					}
					memcpy(audio_buf+ audio_buf_size, c + 4, nBuffSize);
					audio_buf_size += nBuffSize;
					delete[] c;
				}

				/*QMutexLocker locker(&m_MutexOpQue);
				unsigned char* c = m_DecodecBufferQue.dequeue();
				int nBuffSize = *((int*)c);
				memcpy(audio_buf, c + 4, nBuffSize);
				audio_buf_size = nBuffSize;
				delete[] c;*/
			}
			else
			{
				audio_buf_size = 1024;
				memset(audio_buf, 0, audio_buf_size);
			}
			audio_buf_index = 0;
		}
		/*  查看stream可用空间,决定一次copy多少数据,剩下的下次继续copy */
		len1 = audio_buf_size - audio_buf_index;//缓存剩余可取数据大小
		if (len1 > len) {
			len1 = len;
		}
		SDL_MixAudio(stream, (uint8_t *)audio_buf + audio_buf_index, len1, SDL_MIX_MAXVOLUME);
		len -= len1;
		stream += len1;
		audio_buf_index += len1;
	}
	/*     if (m_DecodecBufferQue.count()>0)
		   {
			   QMutexLocker locker(&m_MutexOpQue);
			   unsigned char* c = m_DecodecBufferQue.dequeue();
			   int nBuffSize = *((int*)c);
			   int copyLen = len >= nBuffSize ? nBuffSize : len;
			   memcpy(stream, c+4, nBuffSize);
			   delete[] c;
		   }*/
}
void AudioPlayer::audio_callback_ex(void *userdata, Uint8 *stream, int len)
{
	//SDL2中必须首先使用SDL_memset()将stream中的数据设置为0  

	SDL_memset(stream, 0, len);
	if (m_sAudio_buf_size == 0)
	{
		return;
	}
	len = (len > m_sAudio_buf_size ? m_sAudio_buf_size : len);

	//SDL_MixAudio(stream, m_sAudio_buf_index, len, SDL_MIX_MAXVOLUME);
	memcpy(stream, m_sAudio_buf_index, len);
	m_sAudio_buf_index += len;
	m_sAudio_buf_size -= len;
	//SDL_Delay(1000);
}

void AudioPlayer::run()
{
	{
		QMutexLocker locker(&m_MutexThreadSafelyOut);
		bThreadSafelyOut = false;
	}

	while (1)
	{

		{
			QMutexLocker locker(&m_MutexThreadRun);
			if (!bThreadRun)
			{
				break;
			}

			while (m_sAudio_buf_size > 0 && bThreadRun)//Wait until finish
			{
				SDL_Delay(1);
				//continue;
			}
		}
		/* if (m_DecodecBufferQue.count() >0 )
		{
			QMutexLocker locker(&m_MutexOpQue);
			unsigned char* c = m_DecodecBufferQue.dequeue();

			int nBuffSize = *((int*)c);
			memcpy(m_sAudio_buf, c + 4, nBuffSize);
			m_sAudio_buf_size = nBuffSize;
			m_sAudio_buf_index = m_sAudio_buf;
			delete[] c;
		}*/
		//用以下做测试
		if (m_DecodecBufferQue.count() > 50)
		{
			for (int i = 0; i < 50; ++i)
			{
				QMutexLocker locker(&m_MutexOpQue);
				unsigned char* c = m_DecodecBufferQue.dequeue();
				int nBuffSize = *((int*)c);
				memcpy(m_sAudio_buf + m_sAudio_buf_size, c + 4, nBuffSize);
				m_sAudio_buf_size += nBuffSize;
				delete[] c;
			}
			m_sAudio_buf_index = m_sAudio_buf;
		}

	}
	{
		QMutexLocker locker(&m_MutexThreadSafelyOut);
		bThreadSafelyOut = true;
	}
}

以上零零散散的组件,相关信号槽关系,相关搭配就由这个显示窗口顺带承包了吧,相当于一个组织者O(∩_∩)O哈哈~

咦?我的cpp文件呢,等我找到了补上

#pragma once

#include 
#include 
#include 
#include "ui_FFMPEG_LEARN.h"
#include 
#include 
#include "DecodecThread.h"
#include "videodownloaddialog.h"
#include "AudioPlayer.h"


#include
using namespace std;


class FFMPEG_LEARN : public QMainWindow
{
	Q_OBJECT

public:
	FFMPEG_LEARN(QWidget *parent = Q_NULLPTR);
	~FFMPEG_LEARN();

private:
	Ui::FFMPEG_LEARNClass ui;

	QTimer m_timer;
	QWidget* m_CentralWidget;
	QLabel* m_CentralCanvas;
	QQueue m_ImagesQueue;
	QMutex m_mutext;
	DecodecThread* m_decodecThread;
	videodownloaddialog* m_SourceChooseDialog;
	AudioPlayer* m_AudioPlayer;
private:
	void SetCentralImage(QImage image);
	void InitUiAndControl();
	void AddImage(QImage image);
	void GetOutImage(QImage &tempImage);

	//1表示 添加  2表示取出
	void OperateImageQueue(int SaveOrGetOut, QImage& image);
	public slots:
	void slot_ShowFlag();
	void slot_GotOneDecodecPicture(QImage srcImage, const QString strID);
	void start();
	void stop();
	void slot_UpdatMediaSource();

};


#include "FFMPEG_LEARN.h"
#include 
#include 


FFMPEG_LEARN::FFMPEG_LEARN(QWidget *parent)
	: QMainWindow(parent)
{
	ui.setupUi(this);
	m_SourceChooseDialog = new videodownloaddialog(this);
	m_CentralCanvas = new QLabel(this);
	m_CentralWidget = new QWidget(this);
	m_CentralWidget->setStyleSheet("background:blue");
	this->setCentralWidget(m_CentralWidget);
	QHBoxLayout* lay = new QHBoxLayout(this);
	lay->addWidget(m_CentralCanvas);
	m_CentralWidget->setLayout(lay);
	InitUiAndControl();
	m_timer.setInterval(30);
	m_decodecThread = new DecodecThread(this);
	QObject::connect(&m_timer, SIGNAL(timeout()), this, SLOT(slot_ShowFlag()));
	QObject::connect(m_decodecThread, SIGNAL(sig_DecodecOnePicture(QImage, const QString )), this, SLOT(slot_GotOneDecodecPicture(QImage, const QString)));

	m_AudioPlayer = new AudioPlayer(this);
	QObject::connect(m_decodecThread, SIGNAL(sig_DecodecOneAudio(uint8_t* , int , const QString)), m_AudioPlayer, SLOT(slot_GetOneAudioBuffer(uint8_t*,int, const QString )));
	QObject::connect(m_decodecThread, SIGNAL(sig_out_audio_param(int,int,int)), m_AudioPlayer, SLOT(slot_InitSDL_Audio(int,int,int)));

	/*m_QAudioPlayer = new QAudioPlayer(this);
	QObject::connect(m_decodecThread, SIGNAL(sig_DecodecOneAudio(uint8_t*, int, const QString)), m_QAudioPlayer, SLOT(slot_GetOneAudioBuffer(uint8_t*, int, const QString)));
	QObject::connect(m_decodecThread, SIGNAL(sig_out_audio_param(int, int, int)), m_QAudioPlayer, SLOT(slot_Init_QAudio(int, int, int)));
	m_QAudioPlayer->hide();*/
}

FFMPEG_LEARN::~FFMPEG_LEARN()
{
	m_decodecThread->UpdateFormatContext("");
}

void FFMPEG_LEARN::SetCentralImage(QImage image)
{
	QImage adjustSizeImage = image.scaled(m_CentralCanvas->size(),Qt::IgnoreAspectRatio);
	m_CentralCanvas->setPixmap(QPixmap::fromImage(adjustSizeImage));
}

void FFMPEG_LEARN::InitUiAndControl()
{
	QAction* start = new QAction("START", this);
	QAction* stop = new QAction("STOP", this);
	QAction* chooseSource = new QAction("ChooseSource", this);
	this->menuBar()->addAction(start);
	this->menuBar()->addAction(stop);
	this->menuBar()->addAction(chooseSource);
	connect(start, SIGNAL(triggered()), this, SLOT(start()));
	connect(stop, SIGNAL(triggered()), this, SLOT(stop()));
	connect(chooseSource, SIGNAL(triggered()), m_SourceChooseDialog, SLOT(show()));
	connect(m_SourceChooseDialog, SIGNAL(sign_DownLoad()), this, SLOT(slot_UpdatMediaSource()));
}

void FFMPEG_LEARN::AddImage(QImage image)
{
	QImage pImage;
	pImage = image.copy();
	if (!pImage.isNull())
	{
		m_ImagesQueue.enqueue(pImage);
	}
	
}

void FFMPEG_LEARN::GetOutImage(QImage& tempImage)
{
	if (m_ImagesQueue.count())
	{
		QImage pImage = m_ImagesQueue.dequeue();
		if (!pImage.isNull())
		{
			tempImage = pImage.copy();
			//SetCentralImage(tempImage);
		}
		
	}	
}

void FFMPEG_LEARN::OperateImageQueue(int SaveOrGetOut, QImage& image)
{
	QMutexLocker locker(&m_mutext);
	switch (SaveOrGetOut)
	{
	case 1:
		AddImage(image);
		break;
	case 2:
		GetOutImage(image);
		break;
	case 3:
		if (m_ImagesQueue.count())
		{
			m_ImagesQueue.clear();
		}
		break;
	default:
		break;
	}
}

void FFMPEG_LEARN::slot_ShowFlag()
{
	QImage image;
	OperateImageQueue(2, image);
	if (!image.isNull())
	{
		SetCentralImage(image);
	}
		
}

void FFMPEG_LEARN::slot_GotOneDecodecPicture(QImage srcImage, const QString strID)
{
	
	if (m_ImagesQueue.count() > 4000)
	{
		return;
	}	
	OperateImageQueue(1, srcImage);
}

void FFMPEG_LEARN::start()
{
//	m_decodecThread->UpdateFormatContext("C:/Users/hewei/Documents/Visual Studio 2015/Projects/FFMPEG_LEARN/Miedia/dota.avi");
//	m_decodecThread->start();
	m_timer.start();
}

void FFMPEG_LEARN::stop()
{
	
	m_timer.stop();
}

void FFMPEG_LEARN::slot_UpdatMediaSource()
{
	m_timer.stop();
	QString newSource = m_SourceChooseDialog->GetFilePath();
	QByteArray ba = newSource.toLatin1();
	QImage image;
	OperateImageQueue(3, image);
	m_decodecThread->UpdateFormatContext(ba.data());
	m_timer.start();
	m_SourceChooseDialog->setVisible(false);
}

#ifndef VIDEODOWNLOADDIALOG_H
#define VIDEODOWNLOADDIALOG_H
#include "ui_videodownloaddialog.h"
#include 

class QDateTime;
class videodownloaddialog : public QDialog
{
	Q_OBJECT

public:
	videodownloaddialog(QWidget *parent = 0);
	~videodownloaddialog();
	void SetBeginTime(const QDateTime& datatime);
	const QDateTime& GetBeginTime();
	void SetEndTime(const QDateTime& datatime);
	const QDateTime& GetEndTime();
	void SetFilePath(const QString&  path);
	const QString& GetFilePath();
	public slots:
		void slot_FileDialog();
signals:
		void sign_DownLoad();
private:
	QString m_currentFilePath;
	QDateTime m_BeginTime;
	QDateTime m_EndTime;
	//QFileDialog m_fileDialog;
//protected:
//	void paintEvent(QPaintEvent *);
private:
	Ui::videodownloaddialog ui;
};

#endif // VIDEODOWNLOADDIALOG_H




#include "videodownloaddialog.h"
#include
#include 
#include 
videodownloaddialog::videodownloaddialog(QWidget *parent)
	: QDialog(parent)
{
	ui.setupUi(this);
	m_BeginTime=QDateTime::currentDateTime();
	m_EndTime=m_BeginTime;
	SetBeginTime(m_BeginTime);
	SetEndTime(m_EndTime);
	connect(ui.tbOk,SIGNAL(clicked()),this,SIGNAL(sign_DownLoad()));
	connect(ui.tbCancel,SIGNAL(clicked()),this,SLOT(hide()));
	connect(ui.btnscan,SIGNAL(clicked()),this,SLOT(slot_FileDialog()));
	this->setWindowTitle(QString::fromLocal8Bit("选择视频来源"));

}

videodownloaddialog::~videodownloaddialog()
{

}
void videodownloaddialog::SetBeginTime( const QDateTime& datatime )
{
	ui.BeginTimeEdit->setDateTime(datatime);
}

const QDateTime& videodownloaddialog::GetBeginTime()
{
	m_BeginTime=ui.BeginTimeEdit->dateTime();
	return m_BeginTime;
}

void videodownloaddialog::SetEndTime( const QDateTime& datatime )
{
	ui.EndTimeEdit->setDateTime(datatime);
}

const QDateTime& videodownloaddialog::GetEndTime()
{
	m_EndTime=ui.EndTimeEdit->dateTime();
	return m_EndTime;
}

void videodownloaddialog::SetFilePath( const QString& path )
{
	ui.pathLineEdit->setText(path);	
}


const QString& videodownloaddialog::GetFilePath()
{
	m_currentFilePath= ui.pathLineEdit->text();
	return m_currentFilePath;
}


void videodownloaddialog::slot_FileDialog()
{
	m_currentFilePath=GetFilePath();
	if(m_currentFilePath.isEmpty())
	{
		m_currentFilePath=QApplication::applicationFilePath();
	}

	QString filepath=QFileDialog::getSaveFileName(
		this,//对话框父对象  
		tr("选择路径"),  
		m_currentFilePath, //默认打开路径 
		//tr("Images (*.png *.bmp *.jpg *.tif *.GIF)")); //选择路径  
		0,//过滤
		0//选项
		);
	if(!filepath.isEmpty())
	{
		SetFilePath(filepath);
	}

}

这个是一个输入输出路径,是我以前写其他程序的时候写的,这里方便直接用了,就是用于弹出框让我输入路径啥的。

 

不得不承认,这个积木搭的确实不像样,哈哈,但肯定是能用的,这里差一个cpp,等找到了加上供参考。。不过内容也简单,就是一个窗口,中央一个播放视频的画布,将得到的RGB格式直接挨个显示出来或者存到队列里面去。当然,由于RGB是非常占内存的,所以,请控制数量或者其他办法规避内存持续上涨。因为我发现播放消耗的速度33ms/frame是远远慢于解码速度的。

纯当练手,毕竟书到用时方恨少,说不定哪天我就用到ffmpeg和sdl了。不过这里音频还是有点问题的,比如前一两分钟总是杂音,这个嘛。。。等我吃完火锅再回来看是怎么个回事咯、拜了个拜。

火锅吃了,发现就是解码速度过快导致咯。。嘿嘿。

去官网把对应的库下载下来,然后整合编译成自己的来玩玩吧,至少我看cctv5是没问题的,比较流畅。可以是本地也可以是网络串流,这里网上找了些测试的,在网络条件好的情况下基本无误。好了。。。粗糙的播放器是完成了,网络一般是实时的,不存在同步问题,至于本地文件,同步嘛,这里就不硬塞了,下次重构一个得嘞。

一下是我网上找的,用这套代码编译的均可使用,体育爱好者可以用这个看,输入地址即可,至少没有广告啦。

当然,什么暂停,快放啦,切换视频源啦,通通不是重点,这个会了,那些还不是分分钟实现么。

1,RTMP协议直播源

香港卫视:rtmp://live.hkstv.hk.lxdns.com/live/hks

 

2,RTSP协议直播源

大熊兔(点播):rtsp://184.72.239.149/vod/mp4://BigBuckBunny_175k.mov

 

3,HTTP协议直播源

香港卫视:http://live.hkstv.hk.lxdns.com/live/hks/playlist.m3u8

CCTV1高清:http://ivi.bupt.edu.cn/hls/cctv1hd.m3u8

CCTV3高清:http://ivi.bupt.edu.cn/hls/cctv3hd.m3u8

CCTV5高清:http://ivi.bupt.edu.cn/hls/cctv5hd.m3u8

CCTV5+高清:http://ivi.bupt.edu.cn/hls/cctv5phd.m3u8

CCTV6高清:http://ivi.bupt.edu.cn/hls/cctv6hd.m3u8

你可能感兴趣的:(Qt)