安装完成之后就可以开始编写第一个程序了
创建一个qt工程,添加头文件和库就可以了
1.在.pro中添加需要用到的库,下面是我用到的,大家可以酌情添加
INCLUDEPATH += D:\global_dev\ffmpeg\include
LIBS += -LD:\global_dev\ffmpeg\lib -lavcodec
LIBS += -LD:\global_dev\ffmpeg\lib -lavdevice
LIBS += -LD:\global_dev\ffmpeg\lib -lavfilter
LIBS += -LD:\global_dev\ffmpeg\lib -lavformat
LIBS += -LD:\global_dev\ffmpeg\lib -lavutil
LIBS += -LD:\global_dev\ffmpeg\lib -lswscale
LIBS += -LD:\global_dev\ffmpeg\lib -lswresample
2.包含对应的头文件
extern "C" {
#include "libavformat/avformat.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include
#include
#include
#include
#include
}
这是我用到的头文件,以为ffmpeg使用c语言开发,所以头文件需要使用extern “c”
3.音视频采集代码
AVInputStream.h
#ifndef AVINPUTSTREAM_H
#define AVINPUTSTREAM_H
#include "av.h"
#include
#include
using namespace std;
typedef int (* VideoCaptureCB)(AVStream * input_st, enum AVPixelFormat pix_fmt, AVFrame *pframe, __int64 lTimeStamp);
typedef int (* AudioCaptureCB)(AVStream * input_st, AVFrame *pframe, __int64 lTimeStamp);
class CAVInputStream
{
public:
CAVInputStream(void);
~CAVInputStream(void);
public:
void SetVideoCaptureDevice(string device_name);
void SetAudioCaptureDevice(string device_name);
bool OpenInputStream();
void CloseInputStream();
bool StartCapture();
void SetVideoCaptureCB(VideoCaptureCB pFuncCB);
void SetAudioCaptureCB(AudioCaptureCB pFuncCB);
bool GetVideoInputInfo(int & width, int & height, int & framerate, AVPixelFormat & pixFmt);
bool GetAudioInputInfo(AVSampleFormat & sample_fmt, int & sample_rate, int & channels);
protected:
int ReadVideoPackets();
int ReadAudioPackets();
protected:
string m_video_device;
string m_audio_device;
int m_videoindex;
int m_audioindex;
AVFormatContext *m_pVidFmtCtx;
AVFormatContext *m_pAudFmtCtx;
AVInputFormat *m_pInputFormat;
AVPacket *dec_pkt;
bool m_exit_thread; //退出线程的标志变量
VideoCaptureCB m_pVideoCBFunc; //视频数据回调函数指针
AudioCaptureCB m_pAudioCBFunc; //音频数据回调函数指针
mutex m_mutex;
int64_t m_start_time; //采集的起点时间
};
#endif
AVInputStream.cpp
#include "AVInputStream.h"
#include
#include
#include
#include
static std::string AnsiToUTF8(const char *_ansi, int _ansi_len)
{
std::string str_utf8("");
wchar_t* pUnicode = NULL;
BYTE * pUtfData = NULL;
do
{
int unicodeNeed = MultiByteToWideChar(CP_ACP, 0, _ansi, _ansi_len, NULL, 0);
pUnicode = new wchar_t[unicodeNeed + 1];
memset(pUnicode, 0, (unicodeNeed + 1)*sizeof(wchar_t));
int unicodeDone = MultiByteToWideChar(CP_ACP, 0, _ansi, _ansi_len, (LPWSTR)pUnicode, unicodeNeed);
if (unicodeDone != unicodeNeed)
{
break;
}
int utfNeed = WideCharToMultiByte(CP_UTF8, 0, (LPWSTR)pUnicode, unicodeDone, (char *)pUtfData, 0, NULL, NULL);
pUtfData = new BYTE[utfNeed + 1];
memset(pUtfData, 0, utfNeed + 1);
int utfDone = WideCharToMultiByte(CP_UTF8, 0, (LPWSTR)pUnicode, unicodeDone, (char *)pUtfData, utfNeed, NULL, NULL);
if (utfNeed != utfDone)
{
break;
}
str_utf8.assign((char *)pUtfData);
} while (false);
if (pUnicode)
{
delete[] pUnicode;
}
if (pUtfData)
{
delete[] pUtfData;
}
return str_utf8;
}
CAVInputStream::CAVInputStream(void)
{
m_exit_thread = false;
m_pVidFmtCtx = NULL;
m_pAudFmtCtx = NULL;
m_pInputFormat = NULL;
dec_pkt = NULL;
m_pVideoCBFunc = NULL;
m_pAudioCBFunc = NULL;
m_videoindex = -1;
m_audioindex = -1;
m_start_time = 0;
}
CAVInputStream::~CAVInputStream(void)
{
CloseInputStream();
}
void CAVInputStream::SetVideoCaptureCB(VideoCaptureCB pFuncCB)
{
m_pVideoCBFunc = pFuncCB;
}
void CAVInputStream::SetAudioCaptureCB(AudioCaptureCB pFuncCB)
{
m_pAudioCBFunc = pFuncCB;
}
void CAVInputStream::SetVideoCaptureDevice(string device_name)
{
m_video_device = device_name;
}
void CAVInputStream::SetAudioCaptureDevice(string device_name)
{
m_audio_device = device_name;
}
bool CAVInputStream::OpenInputStream()
{
if(m_video_device.empty() && m_audio_device.empty())
{
qDebug("you have not set any capture device \n");
return false;
}
int i;
//打开Directshow设备前需要调用FFmpeg的avdevice_register_all函数,否则下面返回失败
m_pInputFormat = av_find_input_format("dshow");
assert(m_pInputFormat != NULL);
// Set device params
AVDictionary *device_param = 0;
//if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time
//setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency
//av_dict_set(&device_param, "rtbufsize", "10M", 0);
if(!m_video_device.empty())
{
int res = 0;
string device_name = "video=" + m_video_device;
string device_name_utf8 = AnsiToUTF8(device_name.c_str(), device_name.length()); //转成UTF-8,解决设备名称包含中文字符出现乱码的问题
//Set own video device's name
if ((res = avformat_open_input(&m_pVidFmtCtx, device_name_utf8.c_str(), m_pInputFormat, &device_param)) != 0)
{
qDebug("Couldn't open input video stream.(无法打开输入流)\n");
return false;
}
//input video initialize
if (avformat_find_stream_info(m_pVidFmtCtx, NULL) < 0)
{
qDebug("Couldn't find video stream information.(无法获取流信息)\n");
return false;
}
m_videoindex = -1;
for (i = 0; i < m_pVidFmtCtx->nb_streams; i++)
{
if (m_pVidFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
m_videoindex = i;
break;
}
}
if (m_videoindex == -1)
{
qDebug("Couldn't find a video stream.(没有找到视频流)\n");
return false;
}
if (avcodec_open2(m_pVidFmtCtx->streams[m_videoindex]->codec, avcodec_find_decoder(m_pVidFmtCtx->streams[m_videoindex]->codec->codec_id), NULL) < 0)
{
qDebug("Could not open video codec.(无法打开解码器)\n");
return false;
}
}
//
if(!m_audio_device.empty())
{
string device_name = "audio=" + m_audio_device;
string device_name_utf8 = AnsiToUTF8(device_name.c_str(), device_name.length()); //转成UTF-8,解决设备名称包含中文字符出现乱码的问题
//Set own audio device's name
if (avformat_open_input(&m_pAudFmtCtx, device_name_utf8.c_str(), m_pInputFormat, &device_param) != 0){
qDebug("Couldn't open input audio stream.(无法打开输入流)\n");
return false;
}
//input audio initialize
if (avformat_find_stream_info(m_pAudFmtCtx, NULL) < 0)
{
qDebug("Couldn't find audio stream information.(无法获取流信息)\n");
return false;
}
m_audioindex = -1;
for (i = 0; i < m_pAudFmtCtx->nb_streams; i++)
{
if (m_pAudFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
m_audioindex = i;
break;
}
}
if (m_audioindex == -1)
{
qDebug("Couldn't find a audio stream.(没有找到音频流)\n");
return false;
}
if (avcodec_open2(m_pAudFmtCtx->streams[m_audioindex]->codec, avcodec_find_decoder(m_pAudFmtCtx->streams[m_audioindex]->codec->codec_id), NULL) < 0)
{
qDebug("Could not open audio codec.(无法打开解码器)\n");
return false;
}
}
return true;
}
bool CAVInputStream::StartCapture()
{
if (m_videoindex == -1 && m_audioindex == -1)
{
qDebug("错误:你没有打开设备 \n");
return false;
}
m_start_time = av_gettime();
m_exit_thread = false;
if(!m_video_device.empty()) {
std::thread readVideo(&CAVInputStream::ReadVideoPackets,this);
readVideo.detach();
}
if(!m_audio_device.empty()) {
std::thread readAudio(&CAVInputStream::ReadAudioPackets,this);
readAudio.detach();
}
return true;
}
void CAVInputStream::CloseInputStream()
{
m_exit_thread = true;
// TODO: 线程回收
//关闭输入流
if (m_pVidFmtCtx != NULL)
{
avformat_close_input(&m_pVidFmtCtx);
//m_pVidFmtCtx = NULL;
}
if (m_pAudFmtCtx != NULL)
{
avformat_close_input(&m_pAudFmtCtx);
//m_pAudFmtCtx = NULL;
}
if(m_pVidFmtCtx)
avformat_free_context(m_pVidFmtCtx);
if(m_pAudFmtCtx)
avformat_free_context(m_pAudFmtCtx);
m_pVidFmtCtx = NULL;
m_pAudFmtCtx = NULL;
m_pInputFormat = NULL;
m_videoindex = -1;
m_audioindex = -1;
}
int CAVInputStream::ReadVideoPackets()
{
if(dec_pkt == NULL)
{
//TODO: prepare before decode and encode
dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));
}
int encode_video = 1;
int ret;
//TODO: start decode and encode
while (encode_video)
{
if (m_exit_thread)
break;
AVFrame * pframe = NULL;
if ((ret = av_read_frame(m_pVidFmtCtx, dec_pkt)) >= 0)
{
pframe = av_frame_alloc();
if (!pframe)
{
ret = AVERROR(ENOMEM);
return ret;
}
int dec_got_frame = 0;
ret = avcodec_decode_video2(m_pVidFmtCtx->streams[dec_pkt->stream_index]->codec, pframe, &dec_got_frame, dec_pkt);
if (ret < 0)
{
av_frame_free(&pframe);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (dec_got_frame)
{
if(m_pVideoCBFunc)
{
std::lock_guard mu(m_mutex);
m_pVideoCBFunc(m_pVidFmtCtx->streams[dec_pkt->stream_index], m_pVidFmtCtx->streams[m_videoindex]->codec->pix_fmt, pframe, av_gettime() - m_start_time);
}
av_frame_free(&pframe);
}
else
{
av_frame_free(&pframe);
}
av_free_packet(dec_pkt);
}
else
{
if (ret == AVERROR_EOF)
encode_video = 0;
else
{
qDebug("Could not read video frame\n");
break;
}
}
}
return 0;
}
int CAVInputStream::ReadAudioPackets()
{
//TODO: audio trancoding here
int ret;
int encode_audio = 1;
int dec_got_frame_a = 0;
//TODO: start decode and encode
while (encode_audio)
{
if (m_exit_thread)
break;
/**
* Decode one frame worth of audio samples, convert it to the
* output sample format and put it into the FIFO buffer.
*/
AVFrame *input_frame = av_frame_alloc();
if (!input_frame)
{
ret = AVERROR(ENOMEM);
return ret;
}
/** Decode one frame worth of audio samples. */
/** Packet used for temporary storage. */
AVPacket input_packet;
av_init_packet(&input_packet);
input_packet.data = NULL;
input_packet.size = 0;
/** Read one audio frame from the input file into a temporary packet. */
if ((ret = av_read_frame(m_pAudFmtCtx, &input_packet)) < 0)
{
/** If we are at the end of the file, flush the decoder below. */
if (ret == AVERROR_EOF)
{
encode_audio = 0;
}
else
{
qDebug("Could not read audio frame\n");
return ret;
}
}
/**
* Decode the audio frame stored in the temporary packet.
* The input audio stream decoder is used to do this.
* If we are at the end of the file, pass an empty packet to the decoder
* to flush it.
*/
if ((ret = avcodec_decode_audio4(m_pAudFmtCtx->streams[m_audioindex]->codec, input_frame, &dec_got_frame_a, &input_packet)) < 0)
{
qDebug("Could not decode audio frame\n");
return ret;
}
av_packet_unref(&input_packet);
/** If there is decoded data, convert and store it */
if (dec_got_frame_a)
{
if(m_pAudioCBFunc)
{
std::lock_guard mu(m_mutex);
m_pAudioCBFunc(m_pAudFmtCtx->streams[m_audioindex], input_frame, av_gettime() - m_start_time);
}
}
av_frame_free(&input_frame);
}//while
return 0;
}
bool CAVInputStream::GetVideoInputInfo(int & width, int & height, int & frame_rate, AVPixelFormat & pixFmt)
{
if(m_videoindex != -1)
{
width = m_pVidFmtCtx->streams[m_videoindex]->codec->width;
height = m_pVidFmtCtx->streams[m_videoindex]->codec->height;
AVStream *stream = m_pVidFmtCtx->streams[m_videoindex];
pixFmt = stream->codec->pix_fmt;
//frame_rate = stream->avg_frame_rate.num/stream->avg_frame_rate.den;//每秒多少帧
if(stream->r_frame_rate.den > 0)
{
frame_rate = stream->r_frame_rate.num/stream->r_frame_rate.den;
}
else if(stream->codec->framerate.den > 0)
{
frame_rate = stream->codec->framerate.num/stream->codec->framerate.den;
}
return true;
}
return false;
}
bool CAVInputStream::GetAudioInputInfo(AVSampleFormat & sample_fmt, int & sample_rate, int & channels)
{
if(m_audioindex != -1)
{
sample_fmt = m_pAudFmtCtx->streams[m_audioindex]->codec->sample_fmt;
sample_rate = m_pAudFmtCtx->streams[m_audioindex]->codec->sample_rate;
channels = m_pAudFmtCtx->streams[m_audioindex]->codec->channels;
return true;
}
return false;
}
这个类启动两个采集线程,分别采集视频和音频,采集到数据之后通过 VideoCaptureCB和AudioCaptureCB两个回调把数据传出去
4.获取采集到的数据并显示
在主界面分别实现回调函数
int VideoCaptureCallback(AVStream * input_st, enum AVPixelFormat pix_fmt, AVFrame *pframe, __int64 lTimeStamp)
{
thisWindow->PlayVideo(input_st, pframe); // 本地播放
thisWindow->m_OutputStream.write_video_frame(input_st, pix_fmt, pframe, lTimeStamp); // 写入文件和发送到网络
return 0;
}
int AudioCaptureCallback(AVStream * input_st, AVFrame *pframe, __int64 lTimeStamp)
{
thisWindow->PlayAudio(input_st, pframe); // 本地播放
thisWindow->m_OutputStream.write_audio_frame(input_st, pix_fmt, pframe, lTimeStamp); // 写入文件和发送到网络
return 0;
}
5.播放视频,这里我是将图片转换成rgb24播放的
if(m_pRgb24 == NULL || m_nRgbSize == 0)
{
int nRgbSize = st->codec->width * st->codec->height*4; //24位位图占用的大小理论上应为st->codec->width * st->codec->height*3, 但是考虑到每行像素宽带4字节对齐的问题,所以故意分配多一点空间
m_pRgb24 = new BYTE[nRgbSize];
memset(m_pRgb24, 0, nRgbSize);
m_nRgbSize = nRgbSize;
}
if(st->codec->pix_fmt != AV_PIX_FMT_BGR24)
{
if(img_convert_ctx == NULL)
{
img_convert_ctx = sws_getContext(st->codec->width, st->codec->height,
st->codec->pix_fmt,
st->codec->width, st->codec->height,
AV_PIX_FMT_RGB24,
SWS_BICUBIC, NULL, NULL, NULL);
if (img_convert_ctx == NULL)
{
qDebug("sws_getContext() failed \n");
return FALSE;
}
}
uint8_t *rgb_src[3]= {m_pRgb24, NULL, NULL};
int rgb_stride[3] = {st->codec->width*3, 0, 0};
//转成RGB格式
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, st->codec->height, rgb_src, rgb_stride);
}
// 显示到界面
QLabel* labYours = ui->labYours;
QImage Image = QImage((uchar *)m_pRgb24, st->codec->width,st->codec->height, QImage::Format_RGB888);
QPixmap pixmap = QPixmap::fromImage(Image);
int with = labYours->width();
int height = labYours->height();
QPixmap fitpixmapY = pixmap.scaled(with, height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation); // 饱满填充
labYours->setScaledContents(true);
labYours->setPixmap(fitpixmapY);