main.cpp
#include
#include "Controller.h"
using namespace std;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
qDebug() << "main thread:" << QThread::currentThreadId();
Controller *controller = new Controller();
emit controller->operate("copy");
a.exec();
}
Rtmp.h
#pragma once
#include "XMediaEncode.h"
#include
#include
#include
class Rtmp
{
public:
int sampleRate = 44100;
int channels = 2;
int sampleByte = 2;
int nbSample = 1024;
int inWidth = 0;
int inHeight = 0;
int inPixSize = 3;
int fps = 0;
int vindex = 0;
int aindex = 0;
std::string inUrl = "rtsp://test:[email protected]";
//nginx-rtmp 直播服务器rtmp推流URL
std::string outUrl = "rtmp://0.0.0.0/live";
cv::VideoCapture cam;
cv::Mat frame;
//工厂生产方法
static Rtmp *Get(unsigned char index = 0);
virtual bool rtmp_init() = 0;
virtual AVFrame* Resample(char *data) = 0;
virtual AVPacket *EncodeAudio(AVFrame *frame) = 0;
virtual bool SendFrame(AVPacket *pkt,long long pts, int streamIndex) = 0;
virtual AVFrame* RGBToYUV(char *rgb,int inPixSize) = 0;
virtual AVPacket *EncodeVideo(AVFrame* frame) = 0;
virtual ~Rtmp();
protected:
Rtmp();
};
Rtmp.cpp
#include "Rtmp.h"
#include
using namespace std;
using namespace cv;
class CRtmp : public Rtmp
{
bool rtmp_init()
{
//编码器和像素格式转换
mediaEncode = XMediaEncode::Get(0);
//初始化XMediaEncode
mediaEncode->register_ffmpeg();
mediaEncode->channels = channels;
mediaEncode->nbSample = 1024;
mediaEncode->sampleRate = sampleRate;
mediaEncode->inSampleFmt = XSampleFMT::X_S16;
mediaEncode->outSampleFmt = XSampleFMT::X_FLATP;
//cam.open(inUrl);
//1.使用opencv打开rtsp相机
cam.open(0);
if (!cam.isOpened())
{
throw logic_error("cam open failed");
}
cout << inUrl << "cam open success" << endl;
inWidth = cam.get(CAP_PROP_FRAME_WIDTH);
inHeight = cam.get(CAP_PROP_FRAME_HEIGHT);
fps = cam.get(CAP_PROP_FPS);
//2.初始化像素格式转换的上下文初始化
//3.输出的数据结构
mediaEncode->inWidth = inWidth;
mediaEncode->inHeight = inHeight;
mediaEncode->outWidth = inWidth;
mediaEncode->outHeight = inHeight;
mediaEncode->InitScale();
if (!mediaEncode->InitVideoCodec())
{
std::cout << "InitVideoCodec failed!" << std::flush;
return false;
}
if (!mediaEncode->InitResample())
{
std::cout << "InitResample failed" << flush;
return false;
}
if (!mediaEncode->InitAudioCode())
{
std::cout << "InitAudioCode failed" << flush;
return false;
}
//a.创建输出封装器上下文
if (!mediaEncode->Init(outUrl.c_str()))
{
std::cout << "avformat_alloc_output_context2" << flush;
return false;
}
//添加视频或者音频
vindex = mediaEncode->AddStream(mediaEncode->vc);
std::cout<<"vindex " <<vindex<<endl;
if (vindex<0)
{
std::cout << "mediaEncode->AddStream failed" << std::endl;
return false;
}
//b.添加音频流
aindex = mediaEncode->AddStream(mediaEncode->ac);
std::cout<<"aindex " <<aindex<<endl;
if (aindex<0)
{
std::cout << "AddStream failed" << flush;
return false;
}
//打开rtmp的网络输出IO
//写入封装头
if (!mediaEncode->SendHead())
{
std::cout << "SendHead failed" << std::endl;
return false;
}
}
AVFrame *Resample(char *data)
{
mediaEncode->Resample(data);
}
AVPacket *EncodeAudio(AVFrame *frame)
{
mediaEncode->EncodeAudio(frame);
}
bool SendFrame(AVPacket *pkt, long long pts, int streamIndex)
{
mediaEncode->SendFrame(pkt, pts, streamIndex);
}
AVFrame *RGBToYUV(char *rgb, int inPixSize)
{
mediaEncode->RGBToYUV(rgb, inPixSize);
}
AVPacket *EncodeVideo(AVFrame *frame)
{
mediaEncode->EncodeVideo(frame);
}
private:
XMediaEncode *mediaEncode = nullptr;
};
Rtmp::Rtmp()
{
}
Rtmp::~Rtmp()
{
if (cam.isOpened())
cam.release();
}
Rtmp *Rtmp::Get(unsigned char index)
{
static bool isFirst = true;
if (isFirst)
{
isFirst = false;
}
static CRtmp crtmp[255];
return &crtmp[index];
}
Controller.h
#include
class Controller : public QObject
{
Q_OBJECT
public:
Controller(QObject *parent = nullptr);
~Controller();
public slots:
void handleResults(const QString &des);
signals:
void operate(const QString &cmd);
private:
QThread video_thread;
QThread audio_thread;
};
Controller.cpp
#include "Controller.h"
#include "AudioRecordWorker.h"
#include "VideoRecordWorker.h"
#include
#include "Rtmp.h"
Controller::Controller(QObject *parent)
: QObject(parent)
{
Rtmp *rtmp = Rtmp::Get(0);
rtmp->rtmp_init();
#if 1
//音频推流
AudioRecordWorker *audioRecord = new AudioRecordWorker();
audioRecord->moveToThread(&audio_thread);
connect(&audio_thread, &QThread::finished, audioRecord, &QObject::deleteLater);
connect(this, &Controller::operate, audioRecord, &AudioRecordWorker::doSomething);
connect(audioRecord, &AudioRecordWorker::resultNotify, this, &Controller::handleResults);
//#else
//视频推流
VideoRecordWorker *videoRecord = new VideoRecordWorker();
videoRecord->moveToThread(&video_thread);
connect(videoRecord, &VideoRecordWorker::resultNotify, this, &Controller::handleResults);
connect(&video_thread, &QThread::finished, videoRecord, &QObject::deleteLater);
connect(this, &Controller::operate, videoRecord, &VideoRecordWorker::doSomething);
#endif
audio_thread.start();
video_thread.start();
}
Controller::~Controller()
{
video_thread.quit();
video_thread.wait();
audio_thread.quit();
audio_thread.wait();
}
void Controller::handleResults(const QString &des)
{
qDebug() << "handleResults()" << des << "thread:" << QThread::currentThreadId();
}
VideoRecordWorker.h
#pragma once
#include
#include
#include
#include
#include
#include "Rtmp.h"
class XMediaEncode;
class VideoRecordWorker : public QObject
{
Q_OBJECT
public:
explicit VideoRecordWorker(QObject *parent = nullptr);
~VideoRecordWorker();
void mediaEncodeInit();
void startRecord();
void recordParameter(int sampleRate, int channels, int sampleByte , int nbSample);
public slots:
void doSomething(const QString &cmd);
signals:
void resultNotify(const QString &des);
private:
std::string inUrl = "rtsp://test:[email protected]";
//nginx-rtmp 直播服务器rtmp推流URL
std::string outUrl = "rtmp://0.0.0.0/live";
Rtmp *rtmp = nullptr;
int sampleRate = 44100;
int channels = 2;
int sampleByte = 2;
int nbSample = 1024;
int inWidth = 0;
int inHeight = 0;
int fps = 0;
};
VideoRecordWorker.cpp
#include // 1. 更换包含头文件
#include
#include
#include
#include
#include
#include "VideoRecordWorker.h"
#include "XMediaEncode.h"
extern "C"
{
#include
#include
#include
#include
}
extern "C"
{
#include
}
using namespace cv;
using namespace std;
VideoRecordWorker::VideoRecordWorker(QObject *parent)
: QObject(parent)
{
}
VideoRecordWorker::~VideoRecordWorker()
{
qDebug() << "~VideoRecordWorker()" << "thread:" << QThread::currentThreadId();
}
void VideoRecordWorker::mediaEncodeInit()
{
}
void VideoRecordWorker::doSomething(const QString &cmd)
{
qDebug() << "doSomething()" << cmd << "thread:" << QThread::currentThreadId();
startRecord();
emit resultNotify("doSomething ok!");
}
void VideoRecordWorker::startRecord()
{
rtmp = Rtmp::Get(0);
// rtmp->rtmp_init();
long long beginTime = av_gettime();
for (;;)
{
//读取rtsp视频帧,解码视频帧
if (!rtmp->cam.grab())
{
continue;
}
//yuv转为rgb
//输入的数据结构
if (!rtmp->cam.retrieve(rtmp->frame))
{
continue;
}
//rgb to yuv
int inPixSize = rtmp->frame.elemSize();
AVFrame *yuv = rtmp->RGBToYUV((char *)rtmp->frame.data, inPixSize);
if (!yuv) continue;
long long pts = av_gettime() - beginTime;
yuv->pts = pts;
AVPacket *pkt = rtmp->EncodeVideo(yuv);
if (!pkt) continue;
rtmp->SendFrame(pkt, pts, rtmp->vindex);
}
}
void VideoRecordWorker::recordParameter(int sampleRate, int channels, int sampleByte , int nbSample)
{
sampleRate = sampleRate;
channels = channels;
sampleByte = sampleByte;
nbSample = nbSample;
}
XMediaEncode.h
#pragma once
#include
#include
class AVFrame;
class AVPacket;
class AVCodecContext;
class AVFormatContext;
class AVStream;
enum XSampleFMT
{
X_S16 = 1,
X_FLATP = 8
};
//音视频编码接口类
class XMediaEncode
{
public:
//输入参数
int inWidth = 1280;
int inHeight = 720;
int channels = 2;
int sampleRate = 44100;
XSampleFMT inSampleFmt = X_S16;
//输出参数
int outWidth = 1280;
int outHeight = 720;
int bitrate = 4000000;//压缩后每秒视频的bit位大小50KB
int fps = 25;
int nbSample = 1024;
XSampleFMT outSampleFmt = X_FLATP;
//工厂生产方法
static XMediaEncode *Get(unsigned char index = 0);
virtual void register_ffmpeg() = 0;
//初始化封装器的上下文
virtual bool Init(const char *url) = 0;
//初始化像素格式转换的上下文初始化
virtual bool InitScale() = 0;
//音频重采样上下文初始化
virtual bool InitResample() = 0;
//音频编码器初始化
virtual bool InitAudioCode() = 0;
virtual AVFrame *Resample(char *data) = 0;
virtual AVFrame *RGBToYUV(char *rgb, int inPixSize) = 0;
//编码器的初始化
virtual bool InitVideoCodec() = 0;
//视频编码
virtual AVPacket *EncodeVideo(AVFrame *frame) = 0;
//音频编码
virtual AVPacket *EncodeAudio(AVFrame *frame) = 0;
//添加视频或者音频
virtual bool AddStream(AVCodecContext *c) = 0;
//打开rtmp网络IO,发送封装头
virtual bool SendHead() = 0;
//rtmp 帧推流
//virtual bool SendFrame(AVPacket *pkt) = 0;
virtual bool SendFrame(AVPacket *pkt, long long pts, int streamIndex) = 0;
virtual ~XMediaEncode();
//视频编码器的上下文
AVCodecContext *vc = 0;
//音频编码器的上下文
AVCodecContext *ac = 0; //音频编码器上下文
AVFormatContext *ic = 0;
protected:
XMediaEncode();
//rtmp flv 封装器
std::string outUrl = "";
AVStream *vs = 0;
AVStream *as = 0;
QMutex mutex;
};
XMediaEncode.cpp
#include "XMediaEncode.h"
#include
#include
#include
#include
using namespace std;
extern "C"
{
#include
#include
#include
#include
}
class CXMediaEncode: public XMediaEncode
{
public:
void close()
{
if (vsc)
{
sws_freeContext(vsc);
vsc = NULL;
}
if (asc)
{
swr_free(&asc);
}
if (yuv)
{
av_frame_free(&yuv);
}
if (vc)
{
avcodec_free_context(&vc);
}
if (pcm)
{
av_frame_free(&pcm);
}
if (ic)
{
avformat_close_input(&ic);
vs = NULL;
}
vc = NULL;
outUrl = "";
vpts = 0;
av_packet_unref(&vpacket);
apts = 0;
av_packet_unref(&apacket);
}
void register_ffmpeg()
{
//注册所有的编解码器
avcodec_register_all();
//注册所有的封装器
av_register_all();
//注册所有网络协议
avformat_network_init();
}
bool Init(const char *url)
{
///5 封装器和视频流配置
//a.创建输出封装器上下文
int ret = avformat_alloc_output_context2(&ic, 0, "flv", url);
this->outUrl = url;
if (ret != 0)
{
char buf[1024] = {0};
av_strerror(ret, buf, sizeof(buf) - 1);
cout << buf;
return false;
}
return true;
}
bool InitAudioCode()
{
codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!CreateAudioCodec(AV_CODEC_ID_AAC))
{
std::cout << "CreateAudioCodec failed" << std::endl;
return false;
}
std::cout << "InitAudioCode " << std::endl;
ac->bit_rate = 40000;
ac->sample_rate = sampleRate;
ac->sample_fmt = AV_SAMPLE_FMT_FLTP;
ac->channels = channels;
ac->channel_layout = av_get_default_channel_layout(channels);
return OpenCodec(ac);
}
bool InitVideoCodec()
{
//4初始化编码上下文
//a 找到编码器
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!CreateVideoCodec(AV_CODEC_ID_H264))
{
cout << "Can't find h264 encoder!" << endl;
return false;
}
cout << "InitVideoCodec " << endl;
vc->bit_rate = 50 * 1024 * 8; //压缩后每秒视频的bit位大小 50kb
vc->width = outWidth;
vc->height = outHeight;
//vc->time_base = {1, fps}; //时间基数
vc->time_base = { 1,1000000 };
vc->framerate = {fps, 1};
//画面组的大小,多少帧一个关键帧
vc->gop_size = 50;
vc->max_b_frames = 0;
vc->pix_fmt = AV_PIX_FMT_YUV420P;
//d 打开编码器
return OpenCodec(vc);
}
AVPacket *EncodeAudio(AVFrame *frame)
{
//pts 运算
//nb_sample/sample_rate = 一帧音频的秒数
//timebase pts = sec*timebase.den
pcm->pts = apts;
if(apts == pcm->pts)
{
pcm->pts += 1000;
}
apts = pcm->pts;
/
//apts += av_rescale_q(pcm->nb_samples, {1, sampleRate}, ac->time_base);
int ret = avcodec_send_frame(ac, pcm);
if (ret != 0) return NULL;
av_packet_unref(&apacket);
ret = avcodec_receive_packet(ac, &apacket);
if (ret != 0) return NULL;
cout << apacket.size << " " << flush;
return &apacket;
}
AVPacket *EncodeVideo(AVFrame *frame)
{
av_packet_unref(&vpacket);
//h264编码
// frame->pts = vpts;
// vpts++;
int ret = avcodec_send_frame(vc, frame);
if (ret != 0)
return NULL;
//每次都会调用av_frame_unref(frame)
ret = avcodec_receive_packet(vc, &vpacket);
if (ret != 0 || vpacket.size <= 0)
return NULL;
return &vpacket;
}
bool InitScale()
{
//2.初始化格式转换的上下文
vsc = sws_getCachedContext(vsc,
inWidth, inHeight, AV_PIX_FMT_BGR24, //原宽度高度
outWidth, outHeight, AV_PIX_FMT_YUV420P, //输出宽,高,像素格式
SWS_BICUBIC,//尺寸变化算法
0, 0, 0
);
if (!vsc)
{
cout << "sws_getCachedContext failed!";
return false;
}
//3.输出的数据结构
yuv = av_frame_alloc();
yuv->format = AV_PIX_FMT_YUV420P;
yuv->width = inWidth;
yuv->height = inHeight;
yuv->pts = 0;
//分配yuv空间
int ret = av_frame_get_buffer(yuv, 32);
if (ret != 0)
{
char buf[1024] = {0};
av_strerror(ret, buf, sizeof(buf) - 1);
throw logic_error(buf);
}
return true;
}
AVFrame *RGBToYUV(char *rgb, int inPixSize)
{
//rgb to yuv
//输入的数据格式
uint8_t *indata[AV_NUM_DATA_POINTERS] = {0};
//bgrbgrbgr
//plane inData[0]bbbb gggg rrrr
indata[0] = (uint8_t *)rgb;
int insize[AV_NUM_DATA_POINTERS] = {0};
//一行(宽)数据的字节数
insize[0] = inWidth * inPixSize;
int h = sws_scale(vsc, indata, insize, 0, inHeight, //输入数据
yuv->data, yuv->linesize);
if (h <= 0)
{
return NULL;
}
return yuv;
}
bool InitResample()
{
//音频重采样 上下文初始化
asc = swr_alloc_set_opts(asc,
av_get_default_channel_layout(channels), (AVSampleFormat)outSampleFmt, sampleRate, //输出格式
av_get_default_channel_layout(channels), (AVSampleFormat)inSampleFmt, sampleRate, //输入格式
0, 0);
if (!asc)
{
cout << "swr_alloc_set_opts failed!";
return false;
}
int ret = swr_init(asc);
if (ret != 0)
{
char err[1024] = {0};
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
return false;
}
std::cout << "音频重采样上下文初始化成功" << endl;
//音频从采样空间的分配
pcm = av_frame_alloc();
pcm->format = outSampleFmt;
pcm->channels = channels;
pcm->channel_layout = av_get_default_channel_layout(channels);
pcm->nb_samples = nbSample; //一帧音频一个通道的采样数
ret = av_frame_get_buffer(pcm, 0); //给pcm分配存储空间
if (ret != 0)
{
char err[1024] = {0};
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
return false;
}
return true;
}
AVFrame *Resample(char *data)
{
const uint8_t *indata[AV_NUM_DATA_POINTERS] = {0};
indata[0] = (uint8_t *)data;
int len = swr_convert(
asc, pcm->data, pcm->nb_samples, //输出参数,输出存储地址和样本数量
indata, pcm->nb_samples);
// qDebug()<<"swr_convert = "<
if (len <= 0)
{
return NULL;
}
return pcm;
}
bool AddStream(AVCodecContext *c)
{
if (!c) return false;
//b.添加视频流
AVStream *st = avformat_new_stream(ic, NULL);
if (!st)
{
cout << "avformat_new_stream failed" << endl;
return false;
}
st->codecpar->codec_tag = 0;
//从编码器复制参数
avcodec_parameters_from_context(st->codecpar, c);
av_dump_format(ic, 0, outUrl.c_str(), 1);
if (c->codec_type == AVMEDIA_TYPE_VIDEO)
{
vc = c;
vs = st;
}
else if (c->codec_type == AVMEDIA_TYPE_AUDIO)
{
ac = c;
as = st;
}
return st->index;
}
bool SendHead()
{
std::cout << "start SendHead " << std::endl;
//打开rtmp的网络输出IO
int ret = avio_open(&ic->pb, outUrl.c_str(), AVIO_FLAG_WRITE);
if (ret != 0)
{
char buf[1024] = {0};
av_strerror(ret, buf, sizeof(buf) - 1);
cout << "avio_open failed " << buf << endl;
return false;
}
//写入封装头
ret = avformat_write_header(ic, NULL);
if (ret != 0)
{
char buf[1024] = {0};
av_strerror(ret, buf, sizeof(buf) - 1);
cout << "avformat_write_header failed " << buf << endl;
return false;
}
return true;
}
bool SendFrame(AVPacket *pkt, long long pts, int streamIndex)
{
pkt->pts = pts;
pkt->stream_index = streamIndex;
if (!pkt || pkt->size <= 0 || !pkt->data)
{
std::cout << "pkt is NULL" << flush;
return false;
}
AVRational stime;
AVRational dtime;
//判断音视频
std::cout<<"vc = "<<vc<<"vs = "<<vs<<"pkt->stream_index= "<<pkt->stream_index<<std::endl;
std::cout<<"ac = "<<ac<<"as = "<<as<<"pkt->stream_index= "<<pkt->stream_index<<std::endl;
if (vc && vs && pkt->stream_index == vs->index)
{
stime = vc->time_base;
dtime = vs->time_base;
}
else if (ac && as && pkt->stream_index == as->index)
{
stime = ac->time_base;
dtime = as->time_base;
}
else
{
return false;
}
//推流
pkt->pts = av_rescale_q(pkt->pts, stime, dtime);
pkt->dts = av_rescale_q(pkt->dts, stime, dtime);
pkt->duration = av_rescale_q(pkt->duration, stime, dtime);
if (vc && vs && pkt->stream_index == vs->index)
{
std::cout << "vpkt->pts = " << pkt->pts << " vpkt->dts = " << pkt->dts << "vpkt->duration = " << pkt->duration << std::endl;
std::cout << "vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv"<< std::endl;
}
else if (ac && as && pkt->stream_index == as->index)
{
std::cout << "apkt->pts = " << pkt->pts << " apkt->dts = " << pkt->dts << "apkt->duration = " << pkt->duration << std::endl;
std::cout << "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"<< std::endl;
}
int ret = av_interleaved_write_frame(ic, pkt);
if (ret == 0)
{
cout << "#" << flush;
}
return true;
}
private:
bool OpenCodec(AVCodecContext *c)
{
//打开音频编码器
int ret = avcodec_open2(c, 0, 0);
if (ret != 0)
{
char err[1024] = {0};
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
avcodec_free_context(&c);
cout << "avcodec_open2 failed!" << endl;
return false;
}
cout << "avcodec_open2 success!" << endl;
return true;
}
bool CreateAudioCodec(AVCodecID cid)
{
//一次读取一帧音频的字节数
///4 初始化音频编码器
AVCodec *codec = avcodec_find_encoder(cid);
if (!codec)
{
cout << "avcodec_find_encoder failed!" << endl;
return false;
}
ac = avcodec_alloc_context3(codec);
if (!ac)
{
cout << "avcodec_alloc_context3 cid failed!" << endl;
return false;
}
cout << "avcodec_alloc_context3 success!" << endl;
ac->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
ac->thread_count = 4;
return true;
}
bool CreateVideoCodec(AVCodecID cid)
{
//一次读取一帧音频的字节数
///4 初始化音频编码器
AVCodec *codec = avcodec_find_encoder(cid);
if (!codec)
{
cout << "avcodec_find_encoder failed!" << endl;
return false;
}
vc = avcodec_alloc_context3(codec);
if (!vc)
{
cout << "avcodec_alloc_context3 cid failed!" << endl;
return false;
}
cout << "avcodec_alloc_context3 success!" << endl;
vc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
vc->thread_count = 4;
return true;
}
SwsContext *vsc = NULL; //像素格式上下文
SwrContext *asc = NULL; //像素格式上下文
AVFrame *yuv = NULL;
AVFrame *pcm = NULL; //输出的pcm
AVPacket vpacket = {0}; //视频帧
AVPacket apacket = {0}; //音频帧
AVCodec *codec = NULL; //音频重采样的上下文
int vpts = 0;
int apts = 0;
};
XMediaEncode *XMediaEncode::Get(unsigned char index)
{
static bool isFirst = true;
if (isFirst)
{
//注册所有的编码器
avcodec_register_all();
isFirst = false;
}
static CXMediaEncode cxm[255];
return &cxm[index];
}
XMediaEncode::XMediaEncode()
{
}
XMediaEncode::~XMediaEncode()
{
}
AudioRecordWorker.h
#include
#include
#include
#include
#include
#include
#include "Rtmp.h"
class AudioRecordWorker : public QObject
{
Q_OBJECT
public:
explicit AudioRecordWorker(QObject *parent = nullptr);
~AudioRecordWorker();
void mediaEncodeInit();
void startRecord();
void recordParameter(int sampleRate, int channels, int sampleByte , int nbSample);
public slots:
void doSomething(const QString &cmd);
signals:
void resultNotify(const QString &des);
private:
QAudioFormat format;
QAudioDevice info;
QAudioSource *audio = nullptr;
QIODevice *io = nullptr;
std::string outUrl = "rtmp://0.0.0.0/live";
Rtmp *rtmp = nullptr;
int sampleRate = 44100;
int channels = 2;
int sampleByte = 2;
int nbSample = 1024;
};
AudioRecordWorker.cpp
#include
#include
#include
#include "AudioRecordWorker.h"
#include "VideoRecordWorker.h"
extern "C"
{
#include
#include
#include
#include
}
extern "C"
{
#include
}
using namespace std;
AudioRecordWorker::AudioRecordWorker(QObject *parent)
: QObject(parent)
{
//1.qt音频开始录制
format.setSampleRate(sampleRate);
format.setChannelCount(channels);
format.setSampleFormat(QAudioFormat::Int16);
info = QMediaDevices::defaultAudioInput();
if (!info.isFormatSupported(format))
{
qWarning() << "Default format not supported, trying to use the nearest.";
}
audio = new QAudioSource(format, this);
qDebug() << "AudioRecordWorker()" << "thread:" << QThread::currentThreadId();
//开始录制音频
io = audio->start();
}
AudioRecordWorker::~AudioRecordWorker()
{
qDebug() << "~AudioRecordWorker()" << "thread:" << QThread::currentThreadId();
}
void AudioRecordWorker::doSomething(const QString &cmd)
{
qDebug() << "doSomething()" << cmd << "thread:" << QThread::currentThreadId();
startRecord();
emit resultNotify("doSomething ok!");
}
void AudioRecordWorker::startRecord()
{
rtmp = Rtmp::Get(0);
// rtmp->rtmp_init();
int frameSize = rtmp->nbSample * channels * sampleByte;
char *buf = new char[frameSize];
qDebug() << "size = " << frameSize;
int size = 0;
long long beginTime = av_gettime();
for (;;)
{
qint64 rev_len = io->bytesAvailable();
//std::cout<
//一次读取一帧音频
if (rev_len < frameSize)
{
QThread::msleep(1);
continue;
}
int size = 0;
while (size != frameSize)
{
int len = io->read(buf + size, frameSize - size);
size += len;
}
if (size != frameSize) continue;
//已经读一帧源数据
//cout << size << " "<
//重采样源数据
AVFrame *pcm = rtmp->Resample(buf);
if (!pcm)
{
std::cout << "pcm == NULL" << flush;
}
//pts 运算
//nb_sample/sample_rate = 一帧音频的秒数
//timebase pts = sec*timebase.den
long long pts = av_gettime() - beginTime;
pcm->pts = pts;
AVPacket *pkt = rtmp->EncodeAudio(pcm);
if (!pkt) continue;
//cout<size<<" "<
//推流
// std::cout<<"SendFrame"<
std::cout << "audio_pts = " << pts << std::endl;
rtmp->SendFrame(pkt, pts, rtmp->aindex);
}
delete buf;
}
void AudioRecordWorker::recordParameter(int sampleRate, int channels, int sampleByte , int nbSample)
{
sampleRate = sampleRate;
channels = channels;
sampleByte = sampleByte;
nbSample = nbSample;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.1)
project(opencv_example_project)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(CMAKE_AUTOMOC ON)
set(CMAKE_AUTORCC ON)
set(CMAKE_AUTOUIC ON)
set(CMAKE_BUILD_TYPE "Debug")
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
find_package(OpenCV REQUIRED)
message(STATUS "OpenCV library status:")
message(STATUS " config: ${OpenCV_DIR}")
message(STATUS " version: ${OpenCV_VERSION}")
message(STATUS " libraries: ${OpenCV_LIBS}")
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
find_library(AVCODEC_LIBRARY avcodec)
find_library(AVFORMAT_LIBRARY avformat)
find_library(AVUTIL_LIBRARY avutil)
find_library(AVDEVICE_LIBRARY avdevice)
find_package(Qt6 COMPONENTS Core)
find_package(Qt6 COMPONENTS Gui)
find_package(Qt6 COMPONENTS Multimedia)
find_package(Qt6 COMPONENTS Widgets)
add_executable(qt_audio_rtmp Rtmp.cpp VideoRecordWorker.cpp XMediaEncode.cpp Controller.cpp main.cpp AudioRecordWorker.cpp)
target_link_libraries(qt_audio_rtmp PRIVATE
${OpenCV_LIBS}
Qt::Core
Qt::Gui
Qt::Multimedia
Qt::Widgets
pthread
swresample
m
swscale
avformat
avcodec
avutil
avfilter
avdevice
postproc
z
lzma
rt)