ffmpeg实时采集编码音视频数据

ffmpeg实时采集编码音视频数据

文章目录

  • ffmpeg实时采集编码音视频数据
    • 1.平台
    • 2.背景环境
    • 3.数据格式定义
    • 4.核心代码

1.平台

Windows10,ffmpeg库版本:4.3

2.背景环境

某设备实时采集视频和音频数据;

音频数据

频率:动态变化;
样本数:不固定;
格式:AV_SAMPLE_FMT_S16

视频数据

视频数据:RGB32
大小:1200*1200

3.数据格式定义

音频数据格式

typedef struct _SAUDIO_DATA
{
    int nSampleRate; // 采样频率
    int nSamples;	// 采样数
    BYTE* pBufferData;	// 音频数据内存
    int nBufferSize;	// 音频数据大小
}SAUDIODATA,*PSAUDIODATA;

视频数据格式

typedef struct _SVIDEO_DATA
{
    BYTE* PBufferData;	// 视频数据内存
    int nBufferSize;	// 视频数据大小
}SVIDEODATA,*PSVIDEODATA;

流格式

typedef struct OutPutStream {
	AVStream* st;
	AVCodecContext* enc;

	/* pts of the next frame that will be generated */
	int64_t next_pts;
	int samples_count;

	AVFrame* frame;
	AVFrame* tmp_frame;

	AVPacket* tmp_pkt;

	struct SwsContext* sws_ctx;
	struct SwrContext* swr_ctx;
}OutPutStream;

4.核心代码

创建媒体文件

bool CMyEncoder::CreateMediaFile(LPCTSTR lpstrFilename)
{
	int ret = -1;
	m_bHaveVideo = false;
	m_bEncodeVideo = false;
	m_bHaveAudio = false;
	m_bEncodeAudio = false;
	avformat_alloc_output_context2(&m_pOutFmtCtx, nullptr, nullptr, CW2A(lpstrFilename));
	if (!m_pOutFmtCtx)
	{
		avformat_alloc_output_context2(&m_pOutFmtCtx, NULL, "avi", CW2A(lpstrFilename));
	}

	if (!m_pOutFmtCtx)
		return false;

	m_pOutFmt = m_pOutFmtCtx->oformat;

	if (m_pOutFmt->video_codec != AV_CODEC_ID_NONE)
	{
		// 加入到输出流包装
		AddStream(&m_stVideo, m_pOutFmtCtx, &m_pVideoCodec, m_pOutFmt->video_codec);
		m_bHaveVideo = true;
		m_bEncodeVideo = true;
	}

	if (m_pOutFmt->audio_codec != AV_CODEC_ID_NONE)
	{
		// 加入到输出流包装
		AddStream(&m_stAudio, m_pOutFmtCtx, &m_pAudioCodec, AV_CODEC_ID_AAC);
		m_bHaveAudio = true;
		m_bEncodeAudio = true;
	}

	// 打开编码器
	if (m_bHaveVideo)
		OpenVideo(m_pOutFmtCtx, m_pVideoCodec, &m_stVideo, nullptr);
	if (m_bHaveAudio)
		OpenAudio(m_pOutFmtCtx, m_pAudioCodec, &m_stAudio, nullptr);

    // 打开媒体文件
	if (!(m_pOutFmt->flags & AVFMT_NOFILE))
	{
		ret = avio_open(&m_pOutFmtCtx->pb, CW2A(lpstrFilename), AVIO_FLAG_WRITE);
		if (ret < 0)
		{
			return false;
		}
	}

    // 写入文件头信息
	ret = avformat_write_header(m_pOutFmtCtx, nullptr);
	if (ret < 0)
		return false;

	return true;
}

处理图像帧

bool CMyEncoder::ProcImageFrame(PVOID pImageData, __int64 nSize, int nIndex)
{
	bool bRet = false;
	AVFrame* pSrcFrame = nullptr;
	pSrcFrame = m_stVideo.tmp_frame;

	// 同步时钟
	if (av_compare_ts(m_stVideo.next_pts, m_stVideo.enc->time_base, m_stAudio.next_pts, m_stAudio.enc->time_base) <= 0)
	{
		// 填充图像帧原始数据到AVFrame
		av_image_fill_arrays(pSrcFrame->data, pSrcFrame->linesize, (const uint8_t*)pImageData, AV_PIX_FMT_RGB32, m_stVideo.enc->width, m_stVideo.enc->height, 1);

		// 将原始帧数据转化为编码帧数据
		sws_scale(m_stVideo.sws_ctx, (const uint8_t * const*)pSrcFrame->data, pSrcFrame->linesize, 0, m_stVideo.enc->height, m_stVideo.frame->data, m_stVideo.frame->linesize);

        // 图像PTS自增
		if (m_stVideo.frame)
			m_stVideo.frame->pts = m_stVideo.next_pts++;
        
        // 写入帧
		bRet = WriteFrame(m_pOutFmtCtx, m_stVideo.enc, m_stVideo.st, m_stVideo.frame, m_stVideo.tmp_pkt);
	}

	return bRet;
}

处理音频帧

bool CMyEncoder::ProcAudioFrame(int nSampleRate, int nSampleCount, PVOID pAudioData, __int64 nSize)
{
	if (!pAudioData)
		return false;

	CString strInfo;
	int ret = 0, dst_nb_samples = 0, max_dst_nb_samples = 0, nLineSize = 0, nBufferSize = 0;
	AVCodecContext* audioCodecCtx = nullptr;
	audioCodecCtx = m_stAudio.enc;

	// 初始化编码器参数
	swr_alloc_set_opts(m_stAudio.swr_ctx, audioCodecCtx->channel_layout, audioCodecCtx->sample_fmt, audioCodecCtx->sample_rate, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, nSampleRate, 0, nullptr);
	if (swr_init(m_stAudio.swr_ctx) < 0)
	{
		TRACE(L"初始化音频编码上下文失败\n");
		return false;
	}

	// 当前样本数
	m_nCurSampeCount = nSampleCount * 2;
    
	// 最大采样样本数
	max_dst_nb_samples = dst_nb_samples = av_rescale_rnd(m_nCurSampeCount, audioCodecCtx->sample_rate, nSampleRate, AV_ROUND_UP);
    
	// 获取目标样本数
	dst_nb_samples = av_rescale_rnd(swr_get_delay(m_stAudio.swr_ctx, nSampleRate) + m_nCurSampeCount, audioCodecCtx->sample_rate, nSampleRate, AV_ROUND_UP);
	if (dst_nb_samples > max_dst_nb_samples)
	{
		max_dst_nb_samples = dst_nb_samples;
	}

	// 生成buffer数据
	uint8_t* pBufferData = nullptr;
	nBufferSize = av_samples_get_buffer_size(&nLineSize, audioCodecCtx->channels, max_dst_nb_samples, audioCodecCtx->sample_fmt, 0);
	pBufferData = (uint8_t*)malloc(nBufferSize);
	memset(pBufferData, 0, nBufferSize);

	// 原始样本转换为目标样本数据
	ret = swr_convert(m_stAudio.swr_ctx,
		&pBufferData, max_dst_nb_samples,
		(const uint8_t * *)& pAudioData, m_nCurSampeCount);

	// 重新分配队列大小
	int cache_Size = av_audio_fifo_size(m_pAudioFifo);
	av_audio_fifo_realloc(m_pAudioFifo, cache_Size + max_dst_nb_samples);

	av_audio_fifo_write(m_pAudioFifo, (void**)& pBufferData, max_dst_nb_samples);
	free(pBufferData);

	ProcAudioSamples();

	return true;
}
int CMyEncoder::ProcAudioSamples()
{
	bool bRet = false;
	AVCodecContext* audioCodecCtx = nullptr;
	audioCodecCtx = m_stAudio.enc;
	int codec_need_samples = 0;
	codec_need_samples = audioCodecCtx->frame_size;
	int nSamples = av_audio_fifo_size(m_pAudioFifo);
	int nBufferSize = 0, nLineSize = 0;
	while (nSamples >= codec_need_samples)
	{
		// 同步时钟
		if (av_compare_ts(m_stAudio.next_pts, audioCodecCtx->time_base, m_stVideo.next_pts, m_stVideo.enc->time_base) <= 0)
		{
			nSamples = codec_need_samples;
			av_audio_fifo_read(m_pAudioFifo, (void**)m_stAudio.frame->data, nSamples);
			AVRational oRational = { 1, audioCodecCtx->sample_rate };
			// 更新显示时间
			m_stAudio.frame->pts = av_rescale_q(m_stAudio.samples_count, oRational, audioCodecCtx->time_base);
			m_stAudio.samples_count += audioCodecCtx->frame_size;
			m_stAudio.next_pts = m_stAudio.frame->pts;
			// 编码一帧数据
			bRet = WriteFrame(m_pOutFmtCtx, audioCodecCtx, m_stAudio.st, m_stAudio.frame, m_stAudio.tmp_pkt);
		}

		nSamples = av_audio_fifo_size(m_pAudioFifo);
	}

	return nSamples;
}

处理静音帧

bool CMyEncoder::ProcAudioSilenceFrame()
{
	AVFrame* pSilenceFrame = nullptr;
	AVCodecContext* audioCodecCtx = nullptr;
	audioCodecCtx = m_stAudio.enc;
	bool bRet = false;
	// 同步时钟
	if (av_compare_ts(m_stAudio.next_pts, audioCodecCtx->time_base, m_stVideo.next_pts, m_stVideo.enc->time_base) <= 0)
	{
		pSilenceFrame = AllocSilenceFrame(audioCodecCtx->channels, audioCodecCtx->sample_rate, AV_SAMPLE_FMT_FLT);
		if (pSilenceFrame)
		{
			// 更新pts
			AVRational oRational = { 1, audioCodecCtx->sample_rate };
			pSilenceFrame->pts = av_rescale_q(m_stAudio.samples_count, oRational, audioCodecCtx->time_base);
			m_stAudio.samples_count += audioCodecCtx->frame_size;
			m_stAudio.next_pts = pSilenceFrame->pts;

			bRet = WriteFrame(m_pOutFmtCtx, audioCodecCtx, m_stAudio.st, pSilenceFrame, m_stAudio.tmp_pkt);
			av_frame_free(&pSilenceFrame);
		}
	}

	return bRet;
}

参考:https://ffmpeg.org/doxygen/trunk/muxing_8c-example.html

你可能感兴趣的:(c/c++,音视频)