推流直接使用ffmpeg 推流到 EasyDarwin 服务器,音频流取自电脑拾音器,ffmepg指令为:
ffmpeg -f dshow -i audio=“麦克风 (Realtek® Audio)” -codec:a aac -ac 2 -ar 16000 -f rtsp rtsp://10.1.3.170:554/3_a.sdp
至于怎么推流自行百度呀
#include "playvoiceplayer.h"
#include
PlayVoicePlayer::PlayVoicePlayer(QObject *parent) : QThread(parent)
{
}
void PlayVoicePlayer::startPlay(QString url)
{
qDebug() << "Video2PCM::startPlay()";
playUrl = url;
unGetStream = true;
this->start();
}
void PlayVoicePlayer::run()
{
qDebug() << "Video2PCM::run():"<<playUrl;
isStart = true;
AVFormatContext *pFormatCtx = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVPacket packet;
AVFrame *pAudioFrame = NULL;
uint8_t *buffer = NULL;
struct SwrContext *audio_convert_ctx = NULL;
int got_picture;
int audioIndex;
int out_buffer_size;
av_register_all();
if (avformat_open_input(&pFormatCtx, playUrl.toStdString().data(), NULL, NULL) != 0)
{
emit getPcmStreamStop();
qDebug()<< " Video2PCM Couldn't open an input stream.";
return;
}
pFormatCtx->probesize = 5 *1024; //使用1000*1024 延时大概是2秒开始开始播放1920*1080使用这个参数暂时没发新崩溃的情况
pFormatCtx->max_analyze_duration = 1 * AV_TIME_BASE;
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Couldn't find stream information.";
return;
}
audioIndex = -1;
for (int i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioIndex = i;
break;
}
}
if (audioIndex == -1)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Couldn't find a audio stream.";
return;
}
pCodecCtx = pFormatCtx->streams[audioIndex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) printf("Codec not found.\n");
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Could not open codec.";
return;
}
pAudioFrame = av_frame_alloc();
if (pAudioFrame == NULL)
{
emit getPcmStreamStop();
qDebug()<< "Video2PCM Could not alloc AVFrame";
return;
}
//音频输出参数
uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;//声道格式
AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S32;//采样格式
int out_nb_samples = pCodecCtx->frame_size;//nb_samples: AAC-1024 MP3-1152
// int out_sample_rate = 44100;//采样率
int out_sample_rate = 16000;//采样率
int out_nb_channels = av_get_channel_layout_nb_channels(out_channel_layout);//根据声道格式返回声道个数
out_buffer_size = av_samples_get_buffer_size(NULL, out_nb_channels, out_nb_samples, out_sample_fmt, 1);
buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE);
audio_convert_ctx = swr_alloc();
if (audio_convert_ctx == NULL)
{
{
emit getPcmStreamStop();
qDebug()<< " Video2PCM Could not allocate SwrContext";
return;
}
}
swr_alloc_set_opts(audio_convert_ctx, out_channel_layout, out_sample_fmt,out_sample_rate,
pCodecCtx->channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL);
swr_init(audio_convert_ctx);
int index = 0;//计数器
while (isStart)
{
if(av_read_frame(pFormatCtx, &packet)<0)
{
emit getPcmStreamStop();
break;
}
if (packet.stream_index == audioIndex) {
if (avcodec_decode_audio4(pCodecCtx, pAudioFrame, &got_picture, &packet) < 0) {
qDebug() <<("Error in decoding audio frame.\n");
emit getPcmStreamStop();
break;
}
if (got_picture) {
// int dst_nb_samples = av_rescale_rnd(swr_get_delay(audio_convert_ctx, pAudioFrame->sample_rate) + pAudioFrame->nb_samples, pAudioFrame->sample_rate, pAudioFrame->sample_rate, AVRounding(1));
swr_convert(audio_convert_ctx, &buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pAudioFrame->data, pAudioFrame->nb_samples);
if(unGetStream == true)
{
qDebug() << "Video2PCM unGetStream";
unGetStream =false;
emit getAudioStream();
}
// printf("index:%5d\t pts:%lld\t packet size:%d\n", index, packet.pts, packet.size);
//Write PCM
// fwrite(buffer, 1, out_buffer_size, fp_pcm);
emit decodePCM(packet.pts, QByteArray((char*)buffer, out_buffer_size));
index++;
}
}
av_free_packet(&packet);
}
qDebug() << "Video2PCM close1";
swr_free(&audio_convert_ctx);
av_free(buffer);
av_frame_free(&pAudioFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
isStart= false;
}
对应的PlayVoicePlayer.h文件如下:
#ifndef PLAYVOICEPLAYER_H
#define PLAYVOICEPLAYER_H
#include
#include
#ifdef _WINDOWS
extern "C"
{
#include "libavcodec\avcodec.h"
#include "libavformat\avformat.h"
#include "libswresample\swresample.h"
};
#else
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
};
#endif
#include
#include
#define MAX_AUDIO_FRAME_SIZE 192000
class PlayVoicePlayer : public QThread
{
Q_OBJECT
public:
explicit PlayVoicePlayer(QObject *parent = nullptr);
void startPlay(QString url);
private:
bool isStart = true;
QString playUrl;
bool unGetStream;
signals:
void getPcmStreamStop();
void getAudioStream();
void decodePCM(qint64 pts, const QByteArray& pcm);
protected:
void run();
};
#endif // PLAYVOICEPLAYER_H
通过 decodePCM(packet.pts, QByteArray((char*)buffer, out_buffer_size)); 发送出 PCM数据给播放部分,在播放部分中,首先定义了个QRingBuffer 缓冲区,用于存放接收到的音频流,QRingBuffer不是标准的 库类,所以需要自己添加,这里 不用QSemaphore 来做缓存是因为发现用QSemaphore 做缓存播放有杂音,所以放弃了使用QSemaphore ,有兴趣的童鞋可以再试下用QSemaphore ,QRingBuffer.c具体代码如下:
#include "qringbuffer.h"
#include
char *QRingBuffer::readPointerAtPosition(qint64 pos, qint64 &length)
{
if (pos >= 0)
{
pos += head;
for (int i = 0; i < buffers.size(); ++i)
{
length = (i == tailBuffer ? tail : buffers[i].size());
if (length > pos)
{
length -= pos;
return buffers[i].data() + pos;
}
pos -= length;
}
}
length = 0;
return 0;
}
void QRingBuffer::free(qint64 bytes)
{
Q_ASSERT(bytes <= bufferSize);
while (bytes > 0)
{
const qint64 blockSize = buffers.first().size() - head;
if (tailBuffer == 0 || blockSize > bytes)
{
if (bufferSize <= bytes)
{
if (buffers.first().size() <= basicBlockSize)
{
bufferSize = 0;
head = tail = 0;
} else
{
clear();
}
}
else
{
Q_ASSERT(bytes < MaxByteArraySize);
head += int(bytes);
bufferSize -= bytes;
}
return;
}
bufferSize -= blockSize;
bytes -= blockSize;
buffers.removeFirst();
--tailBuffer;
head = 0;
}
}
char *QRingBuffer::reserve(qint64 bytes)
{
if (bytes <= 0 || bytes >= MaxByteArraySize)
return 0;
if (buffers.isEmpty())
{
buffers.append(QByteArray());
buffers.first().resize(qMax(basicBlockSize, int(bytes)));
}
else
{
const qint64 newSize = bytes + tail;
//如果超过最后一个buffer所含数据的大小,则最后一个buffer需要从新分配
if (newSize > buffers.last().size())
{
//满足以下条件时,将最后一个buffer的容积缩小到其当前所含数据的大小,
//然后新开辟一个buffer,并将该buffer数据的结尾位置tail设置为0
if (newSize > buffers.last().capacity() && (tail >= basicBlockSize
|| newSize >= MaxByteArraySize))
{
buffers.last().resize(tail);
buffers.append(QByteArray());
++tailBuffer;
tail = 0;
}
//将最后一个buffer进行扩容
buffers.last().resize(qMax(basicBlockSize, tail + int(bytes)));
}
}
char *writePtr = buffers.last().data() + tail;
bufferSize += bytes;
Q_ASSERT(bytes < MaxByteArraySize);
tail += int(bytes);
return writePtr;
}
char *QRingBuffer::reserveFront(qint64 bytes)
{
if (bytes <= 0 || bytes >= MaxByteArraySize)
return 0;
if (head < bytes)
{
if (buffers.isEmpty())
{
buffers.append(QByteArray());
}
else
{
buffers.first().remove(0, head);
if (tailBuffer == 0)
tail -= head;
}
head = qMax(basicBlockSize, int(bytes));
if (bufferSize == 0)
{
tail = head;
}
else
{
buffers.prepend(QByteArray());
++tailBuffer;
}
buffers.first().resize(head);
}
head -= int(bytes);
bufferSize += bytes;
return buffers.first().data() + head;
}
void QRingBuffer::chop(qint64 length)
{
Q_ASSERT(length <= bufferSize);
while (length > 0)
{
if (tailBuffer == 0 || tail > length)
{
if (bufferSize <= length)
{
if (buffers.first().size() <= basicBlockSize)
{
bufferSize = 0;
head = tail = 0;
}
else
{
clear();
}
}
else
{
Q_ASSERT(length < MaxByteArraySize);
tail -= int(length);
bufferSize -= length;
}
return;
}
bufferSize -= tail;
length -= tail;
buffers.removeLast();
--tailBuffer;
tail = buffers.last().size();
}
}
void QRingBuffer::clear()
{
if (buffers.isEmpty())
return;
buffers.erase(buffers.begin() + 1, buffers.end());
buffers.first().clear();
head = tail = 0;
tailBuffer = 0;
bufferSize = 0;
}
qint64 QRingBuffer::indexOf(char c, qint64 maxLength, qint64 pos)
{
if (maxLength <= 0 || pos < 0)
return -1;
qint64 index = -(pos + head);
for (int i = 0; i < buffers.size(); ++i)
{
qint64 nextBlockIndex = qMin(index + (i == tailBuffer ? tail : buffers[i].size()),
maxLength);
if (nextBlockIndex > 0)
{
const char *ptr = buffers[i].data();
if (index < 0)
{
ptr -= index;
index = 0;
}
const char *findPtr = reinterpret_cast<const char *>(memchr(ptr, c,
nextBlockIndex - index));
if (findPtr)
return qint64(findPtr - ptr) + index + pos;
if (nextBlockIndex == maxLength)
return -1;
}
index = nextBlockIndex;
}
return -1;
}
qint64 QRingBuffer::read(char *data, qint64 maxLength)
{
const qint64 bytesToRead = qMin(bufferSize, maxLength);
qint64 readSoFar = 0;
while (readSoFar < bytesToRead)
{
const qint64 bytesToReadFromThisBlock = qMin(bytesToRead - readSoFar,
nextDataBlockSize());
if (data)
memcpy(data + readSoFar, readPointer(), bytesToReadFromThisBlock);
readSoFar += bytesToReadFromThisBlock;
free(bytesToReadFromThisBlock);
}
return readSoFar;
}
QByteArray QRingBuffer::read()
{
if (bufferSize == 0)
return QByteArray();
QByteArray qba(buffers.takeFirst());
//避免调整大小时不必要的内存分配,使QByteArray更高效
qba.reserve(0);
if (tailBuffer == 0)
{
qba.resize(tail);
tail = 0;
} else
{
--tailBuffer;
}
qba.remove(0, head);
head = 0;
bufferSize -= qba.size();
return qba;
}
qint64 QRingBuffer::peek(char *data, qint64 maxLength, qint64 pos)
{
qint64 readSoFar = 0;
if (pos >= 0)
{
pos += head;
for (int i = 0; readSoFar < maxLength && i < buffers.size(); ++i)
{
qint64 blockLength = (i == tailBuffer ? tail : buffers[i].size());
if (pos < blockLength)
{
blockLength = qMin(blockLength - pos, maxLength - readSoFar);
memcpy(data + readSoFar, buffers[i].data() + pos, blockLength);
readSoFar += blockLength;
pos = 0;
}
else
{
pos -= blockLength;
}
}
}
return readSoFar;
}
void QRingBuffer::append(const char *data, qint64 size)
{
char *writePointer = reserve(size);
if (size == 1)
*writePointer = *data;
else if (size)
::memcpy(writePointer, data, size);
}
void QRingBuffer::append(const QByteArray &qba)
{
if (tail == 0)
{
if (buffers.isEmpty())
buffers.append(qba);
else
buffers.last() = qba;
}
else
{
buffers.last().resize(tail);
buffers.append(qba);
++tailBuffer;
}
tail = qba.size();
bufferSize += tail;
}
qint64 QRingBuffer::readLine(char *data, qint64 maxLength)
{
if (!data || --maxLength <= 0)
return -1;
qint64 i = indexOf('\n', maxLength);
i = read(data, i >= 0 ? (i+1) : maxLength);
data[i] = '\0';
return i;
}
对应的QRingBuffer .h 文件如下:
#ifndef QRINGBUFFER_P_H
#define QRINGBUFFER_P_H
#include
#include
#ifndef QRINGBUFFER_CHUNKSIZE
#define QRINGBUFFER_CHUNKSIZE 4096
#endif
enum
{
//1G-1字节
MaxAllocSize = (1 << (std::numeric_limits<int>::digits - 1)) - 1
};
enum
{
//1G-1-16字节
MaxByteArraySize = MaxAllocSize - 16
};
class QRingBuffer
{
public:
//默认分配QRINGBUFFER_CHUNKSIZE大小的buffer
QRingBuffer(int growth = QRINGBUFFER_CHUNKSIZE) :
head(0), tail(0), tailBuffer(0), basicBlockSize(growth), bufferSize(0) { }
~QRingBuffer(){}
//获取环形缓冲区指定位置的指针
//length,输出这个指定位置到缓冲区结尾的长度
char *readPointerAtPosition(qint64 pos, qint64 &length);
//申请空间:从尾开始,返回新空间的指针
char *reserve(qint64 bytes);
//申请空间:从头开始,返回新空间的指针
char *reserveFront(qint64 bytes);
//缩短空间
void truncate(qint64 pos)
{
if (pos < bufferSize)
chop(bufferSize - pos);
}
//判断buffers数据是否为空
bool isEmpty()
{
return bufferSize == 0;
}
//从头读取一个字符,并转换为int返回
int getChar()
{
if (isEmpty())
return -1;
char c = *readPointer();
free(1);
return int(uchar(c));
}
//在缓冲区尾部添加字符
void putChar(char c)
{
char *ptr = reserve(1);
*ptr = c;
}
//在缓冲区头部添加字符
void ungetChar(char c)
{
if (head > 0) {
--head;
buffers.first()[head] = c;
++bufferSize;
} else {
char *ptr = reserveFront(1);
*ptr = c;
}
}
//清空缓冲区
void clear();
//读取maxLength长度数据到data中,如果buffers中的数据少于maxLength,则读取所有数据,
//返回读取数据的长度
qint64 read(char *data, qint64 maxLength);
//读取buffers中的第一个buffer
QByteArray read();
//从指定位置pos拷贝maxLength长度的数据到data中
//返回实际截取的数据长度
qint64 peek(char *data, qint64 maxLength, qint64 pos = 0);
//扩展最后一个buffer
void append(const char *data, qint64 size);
//在最后添加一个新buffer
void append(const QByteArray &qba);
//从头释放lenght长度空间,一般需要配合reserve使用
qint64 skip(qint64 length)
{
qint64 bytesToSkip = qMin(length, bufferSize);
free(bytesToSkip);
return bytesToSkip;
}
//从尾释放length长度空间,一般需要配合reserve使用
void chop(qint64 length);
//读取一行,包括该行的结束标志'\n'
qint64 readLine(char *data, qint64 maxLength);
bool canReadLine()
{
return indexOf('\n', bufferSize) >= 0;
}
private:
//获取下一个数据块的大小
//如果只剩一个buffer,返回最后一个buffer所含数据的大小;否则返回第一个buffer所含数据的大小。
qint64 nextDataBlockSize()
{
return (tailBuffer == 0 ? tail : buffers.first().size()) - head;
}
//获取缓冲区第一个有效数据的指针
char *readPointer()
{
return bufferSize == 0 ? Q_NULLPTR : (buffers.first().data() + head);
}
qint64 indexOf(char c, qint64 maxLength, qint64 pos = 0);
//释放空间
void free(qint64 bytes);
private:
QList<QByteArray> buffers;
//标识第一个buffer数据起始位置和最后一个buffer数据的结尾位置
int head, tail;
//大小为buffers.size()-1,如果为0,说明只剩一个buffer
int tailBuffer;
//初始分配空间的大小
int basicBlockSize;
//buffers数据总大小
qint64 bufferSize;
};
#endif // QRINGBUFFER_P_H
另外编写了 MyDevice.h,这个类继承于QIODevice 类,这个类的作用是,当音频播放需要数据的时候,会回调readData函数,这里需要给readData填充数据,注意,不能直接给QIODevice 写值,因为如果接收到数据立刻给QIODevice writedata会导致播放杂音,所以需要音频需要多少值再给音频写入多少值,具体MyDevice.c代码如下:
#include "mydevice.h"
#include
#include "qringbuffer.h"
#include
QRingBuffer ringBuffer;
QMutex audioMutex;
MyDevice::MyDevice(void)
{
this->open(QIODevice::ReadWrite); // 为了解决QIODevice::read (QIODevice): device not open.
}
MyDevice::~MyDevice()
{
this->close();
}
// data为声卡的数据缓冲区地址, maxlen为声卡缓冲区最大能存放的字节数.
qint64 MyDevice::readData(char *data, qint64 maxlen)
{
int getNum=0;
qDebug()<< "maxlen="<<maxlen;
if(maxlen != 0)
{
audioMutex.lock();
getNum= ringBuffer.read(data,maxlen);
audioMutex.unlock();
static long readbufdata=0;
readbufdata+= getNum;
qDebug()<< "音频接收到的readbufdata="<<readbufdata;
}
static long readData = 0;
readData += maxlen;
qDebug()<< "音频需要数据共计read data"<<readData;
qDebug()<< "getNum="<<getNum;
return getNum;
}
qint64 MyDevice::writeData(const char *data, qint64 len)
{
return len;
}
void MyDevice::setData(QByteArray audiodata)
{
audioMutex.lock();
ringBuffer.append(audiodata);
audioMutex.unlock();
}
对应的MyDevice.h文件如下:
#ifndef MYDEVICE_H
#define MYDEVICE_H
#include
#include
#include
#include
#include "QSemaphore"
class MyDevice : public QIODevice
{
private:
public:
MyDevice(); //创建对象传递pcm数据
~MyDevice();
void setData(QByteArray setData);
qint64 readData(char *data, qint64 maxlen); //重新实现的虚函数
qint64 writeData(const char *data, qint64 len); //它是个纯虚函数, 不得不实现
};
#endif // MYDEVICE_H
剩下的就是主页面,绑定接受到是音频流,并写入到声卡的程序:
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include "playvoiceplayer.h"
#include
#include
#include
#include
#include
#include "mydevice.h"
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
, ui(new Ui::MainWindow)
{
ui->setupUi(this);
// QFile inputFile;
// inputFile.setFileName("test.pcm");
// inputFile.open(QIODevice::ReadOnly);
//设置采样格式
// QAudioFormat audioFormat;
// //设置采样率
// audioFormat.setSampleRate(44100);
// //设置通道数
// audioFormat.setChannelCount(2);
// //设置采样大小,一般为8位或16位
// audioFormat.setSampleSize(16);
// //设置编码方式
// audioFormat.setCodec("audio/pcm");
// //设置字节序
// audioFormat.setByteOrder(QAudioFormat::LittleEndian);
// //设置样本数据类型
// audioFormat.setSampleType(QAudioFormat::UnSignedInt);
// QAudioOutput *audio = new QAudioOutput( audioFormat, 0);
// audio->start(&inputFile);
fmt.setSampleRate(16000);
fmt.setChannelCount(2);
fmt.setSampleSize(32);
fmt.setByteOrder(QAudioFormat::LittleEndian); //设置字节序
fmt.setCodec("audio/pcm");
fmt.setSampleType(QAudioFormat::SignedInt); //设置样本数据类型
audioOutput = new QAudioOutput(fmt);
// streamOut = audioOutput->start();
// int size = audioOutput->periodSize();
// qDebug()<< "size ="<
PlayVoicePlayer *voicePlay = new PlayVoicePlayer();
connect(voicePlay,&PlayVoicePlayer::decodePCM,this,[=](qint64 pts, const QByteArray& pcm){
static int beginflag= false;
if(beginflag== false)
{
beginflag= true;
dev = new MyDevice();
audioOutput->start(dev);
connect(dev,&QIODevice::readyRead,this,[=](){
qDebug()<< "readOver";
});
}
static long getdata =0;
getdata += pcm.size();
qDebug()<< "get data"<<pcm.size();
qDebug()<< "get data counter"<<getdata;
dev->setData(pcm);
// streamOut->write(pcm);
QFile file("test.pcm");
file.open(QIODevice::WriteOnly | QIODevice::Append);
file.write(pcm);
file.close();
});
voicePlay->startPlay("rtsp://localhost/3_a.sdp");
// QFile *inputFile= new QFile("test.pcm");
// inputFile->open(QIODevice::ReadOnly);
// audioOutput->start(inputFile);
}
MainWindow::~MainWindow()
{
delete ui;
}
说明:这个程序有些bug,比如 断流重连没处理,等有空再完善;这里只是提供了个解决办法,另外也可以调用SDL 来实现,我这里没有用SDL 直接用的 qt 调用音频实现音频流播放;
其中源码下载地址为:
https://download.csdn.net/download/heguobo111/20545333
注意:我的编译环境选用给的 msvc2017 64位下编译的,如果需要其他编译环境的,请行修改FFMPEG库