FFmpeg封装YUV数据和PCM数据的流程图如下图所示:
为了封装原始的音视频数据,我们首先实现音视频编码器,编码器负责对YUV数据和PCM数据进行封装。通常情况下视频编码器采用的编码格式是H264,音频编码器采用的是AAC。对应的实现如下所示:
//audio_encoder.h
#ifndef _AUDIO_ENCODER_H_
#define _AUDIO_ENCODER_H_
//音频编码器
#include
extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}
class AudioEncoder {
public:
AudioEncoder();
~AudioEncoder();
public:
//初始化编码器
//@1通道数量 @2采样率 @3码率
int InitAAC(int channels, int sample_rate, int bit_rate);
//清理编码器
void DeInit();
//编码对应的内存数据
AVPacket *Encode(AVFrame *frame, int stream_index, int64_t pts, int64_t time_base);
int Encode(AVFrame *farme, int stream_index, int64_t pts, int64_t time_base,
std::vector<AVPacket *> &packets);
//获得帧数据
int GetFrameSize();
//获得编码器的上下文
AVCodecContext *GetCodecContext();
int GetChannels();
int GetSampleRate();
private:
int channels_ = 2;
int sample_rate_ = 44100;
int bit_rate_ = 128 * 1024;
int64_t pts_ = 0;
AVCodecContext* codec_ctx_ = NULL;
};
#endif
//audio_encoder.cpp
#include "audio_encoder.h"
#include
using namespace std;
AudioEncoder::AudioEncoder() {
}
AudioEncoder::~AudioEncoder() {
if (codec_ctx_) {
DeInit();
}
}
int AudioEncoder::InitAAC(int channels, int sample_rate, int bit_rate) {
channels_ = channels;
sample_rate_ = sample_rate;
bit_rate_ = bit_rate;
AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!codec)
{
return -1;
}
codec_ctx_ = avcodec_alloc_context3(codec);
if (!codec_ctx_)
{
return -1;
}
codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
codec_ctx_->bit_rate = bit_rate_;
codec_ctx_->sample_rate = sample_rate_;
codec_ctx_->sample_fmt = AV_SAMPLE_FMT_FLTP;
codec_ctx_->channels = channels_;
codec_ctx_->channel_layout = av_get_default_channel_layout(codec_ctx_->channels);
int ret = avcodec_open2(codec_ctx_, NULL, NULL);
if (ret < 0) {
return -1;
}
return 0;
}
void AudioEncoder::DeInit() {
if (codec_ctx_) {
avcodec_free_context(&codec_ctx_);
}
}
AVPacket* AudioEncoder::Encode(AVFrame *frame, int stream_index, int64_t pts, int64_t time_base) {
if (!codec_ctx_) {
printf("codec_ctx_ null\n");
return NULL;
}
pts = av_rescale_q(pts, AVRational{ 1, (int)time_base }, codec_ctx_->time_base);
if (frame) {
frame->pts = pts;
}
int ret = avcodec_send_frame(codec_ctx_, frame);
if (ret != 0) {
char errbuf[1024] = { 0 };
av_strerror(ret, errbuf, sizeof(errbuf) - 1);
printf("avcodec_send_frame failed:%s\n", errbuf);
return NULL;
}
AVPacket* packet = av_packet_alloc();
ret = avcodec_receive_packet(codec_ctx_, packet);
if (ret != 0) {
char errbuf[1024] = { 0 };
av_strerror(ret, errbuf, sizeof(errbuf) - 1);
printf("aac avcodec_receive_packet failed:%s\n", errbuf);
av_packet_free(&packet);
return NULL;
}
packet->stream_index = stream_index;
return packet;
}
int AudioEncoder::Encode(AVFrame *frame, int stream_index, int64_t pts, int64_t time_base,
std::vector<AVPacket *> &packets) {
if(!codec_ctx_) {
return NULL;
}
pts = av_rescale_q(pts, AVRational{1, (int)time_base}, codec_ctx_->time_base);
if(frame) {
frame->pts = pts;
}
int ret = avcodec_send_frame(codec_ctx_, frame);
if(ret != 0) {
return NULL;
}
while(1)
{
AVPacket *packet = av_packet_alloc();
ret = avcodec_receive_packet(codec_ctx_, packet);
packet->stream_index = stream_index;
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
ret = 0;
av_packet_free(&packet);
break;
} else if (ret < 0) {
av_packet_free(&packet);
ret = -1;
}
packets.push_back(packet);
}
return ret;
}
int AudioEncoder::GetFrameSize() {
if (codec_ctx_) {
return codec_ctx_->frame_size;
}
return 0;
}
AVCodecContext *AudioEncoder::GetCodecContext() {
return codec_ctx_;
}
int AudioEncoder::GetChannels() {
if (codec_ctx_) {
return codec_ctx_->channels;
}
return -1;
}
int AudioEncoder::GetSampleRate() {
if (codec_ctx_) {
return codec_ctx_->sample_rate;
}
return -1;
}
//video_encoder.h
#ifndef _VIDEO_ENCODER_H_
#define _VIDEO_ENCODER_H_
//视频编码器
extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}
#include
class VideoEncoder {
public:
VideoEncoder();
~VideoEncoder();
public:
//初始化/清理编码器
int InitH264(int width, int height, int fps, int bit_rate);
void DeInit();
//对视频进行编码
AVPacket* Encode(uint8_t* yuv_data, int yuv_size, int stream_idx,
int64_t pts, int64_t time_base);
int Encode(uint8_t* yuv_data, int yuv_size, int stream_idx,
int64_t pts, int64_t time_base, std::vector<AVPacket*>& pkts);
//获得编码器上下文
AVCodecContext* GetCodecContext();
private:
int width_ = 0;
int height_ = 0;
int fps_ = 25;
int bit_rate_ = 500*1024;
int64_t pts_ = 0;
AVCodecContext* codec_ctx_ = NULL;
AVFrame* frame_ = NULL;
AVDictionary* dict_ = NULL;
};
#endif
//video_encoder.cpp
#include "video_encoder.h"
extern "C" {
#include "libavutil/imgutils.h"
}
VideoEncoder::VideoEncoder() {
}
VideoEncoder::~VideoEncoder() {
if (codec_ctx_) {
DeInit();
}
}
int VideoEncoder::InitH264(int width, int height, int fps, int bit_rate) {
width_ = width;
height_ = height;
fps_ = fps;
bit_rate_ = bit_rate;
AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
return -1;
}
codec_ctx_ = avcodec_alloc_context3(codec);
if (!codec_ctx_) {
return -1;
}
codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
codec_ctx_->bit_rate = bit_rate_;
codec_ctx_->width = width_;
codec_ctx_->height = height_;
codec_ctx_->framerate.num = fps_;
codec_ctx_->framerate.den = 1;
codec_ctx_->time_base.num = 1;
codec_ctx_->time_base.den = 1000000; // 微秒
codec_ctx_->gop_size = fps_;
codec_ctx_->max_b_frames = 0;
codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
int ret = avcodec_open2(codec_ctx_, NULL, &dict_);
if (ret < 0) {
return -1;
}
frame_ = av_frame_alloc();
if (!frame_) {
return -1;
}
frame_->width = width_;
frame_->height = height_;
frame_->format = codec_ctx_->pix_fmt;
return 0;
}
void VideoEncoder::DeInit() {
if (codec_ctx_) {
avcodec_free_context(&codec_ctx_);
}
if (frame_) {
av_frame_free(&frame_);
}
if (dict_) {
av_dict_free(&dict_);
}
}
AVPacket* VideoEncoder::Encode(uint8_t* yuv_data, int yuv_size, int stream_idx,
int64_t pts, int64_t time_base) {
if (!codec_ctx_) {
return NULL;
}
int ret = 0;
AVRational tb;
tb.num = 1, tb.den = time_base;
pts = av_rescale_q(pts, tb, codec_ctx_->time_base);
frame_->pts = pts;
if (yuv_data) {
int ret_size = av_image_fill_arrays(frame_->data, frame_->linesize,
yuv_data, (AVPixelFormat)frame_->format,
frame_->width, frame_->height, 1);
if (ret_size != yuv_size) {
return NULL;
}
ret = avcodec_send_frame(codec_ctx_, frame_);
} else {
ret = avcodec_send_frame(codec_ctx_, NULL);
}
if (ret != 0) {
return NULL;
}
AVPacket* pkt = av_packet_alloc();
ret = avcodec_receive_packet(codec_ctx_, pkt);
if (ret < 0) {
av_packet_free(&pkt);
return NULL;
}
pkt->stream_index = stream_idx;
return pkt;
}
int VideoEncoder::Encode(uint8_t* yuv_data, int yuv_size, int stream_idx,
int64_t pts, int64_t time_base, std::vector<AVPacket*>& pkts) {
if (!codec_ctx_) {
return -1;
}
int ret = 0;
AVRational tb;
tb.num = 1, tb.den = time_base;
pts = av_rescale_q(pts, tb, codec_ctx_->time_base);
frame_->pts = pts;
if (yuv_data) {
int ret_size = av_image_fill_arrays(frame_->data, frame_->linesize,
yuv_data, (AVPixelFormat)frame_->format,
frame_->width, frame_->height, 1);
if (ret_size != yuv_size) {
return -1;
}
ret = avcodec_send_frame(codec_ctx_, frame_);
} else {
ret = avcodec_send_frame(codec_ctx_, NULL);
}
if (ret != 0) {
return -1;
}
while (true) {
AVPacket* pkt = av_packet_alloc();
ret = avcodec_receive_packet(codec_ctx_, pkt);
pkt->stream_index = stream_idx;
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
ret = 0;
av_packet_free(&pkt);
break;
} else if (ret < 0) {
av_packet_free(&pkt);
ret = -1;
}
pkts.push_back(pkt);
}
return ret;
}
AVCodecContext* VideoEncoder::GetCodecContext() {
return codec_ctx_;
}
由于原始的PCM音频数据采用的数据格式通常为AV_SAMPLE_FMT_S16,而AAC编码器需要的输入为AV_SAMPLE_FMT_FLTP格式。这就导致PCM数据无法直接进行编码,我们需要对PCM数据进行重采样之后再进行编码。音频重采样类的实现如下所示:
//audio_resampler.h
#ifndef _AUDIO_RESAMPLE_H_
#define _AUDIO_RESAMPLE_H_
//音频重采样类
extern "C" {
#include "libavcodec/avcodec.h"
#include "libswresample/swresample.h"
#include "libavformat/avformat.h"
}
//分配/释放数据内存
AVFrame* AllocFltpPcmFrame(int channels, int nb_samples);
void FreePcmFrame(AVFrame* frame);
class AudioResampler {
public:
AudioResampler();
~AudioResampler();
public:
//数据格式,从S16转化成FLTP
int InitFromS16ToFLTP(int in_channels, int in_sample_rate, int out_channels, int out_sample_rate);
int ResampleFromS16ToFLTP(uint8_t *in_data, AVFrame *out_frame);
void DeInit();
private:
int in_channels_;
int in_sample_rate_;
int out_channels_;
int out_sample_rate_;
SwrContext* swr_ctx_ = NULL;
};
#endif
//audio_resampler.cpp
#include "audio_resampler.h"
AudioResampler::AudioResampler()
{
}
AudioResampler::~AudioResampler()
{
if (swr_ctx_) {
DeInit();
}
}
int AudioResampler::InitFromS16ToFLTP(int in_channels, int in_sample_rate, int out_channels, int out_sample_rate)
{
in_channels_ = in_channels;
in_sample_rate_ = in_sample_rate;
out_channels_ = out_channels;
out_sample_rate_ = out_sample_rate;
swr_ctx_ = swr_alloc_set_opts(swr_ctx_,
av_get_default_channel_layout(out_channels_),
AV_SAMPLE_FMT_FLTP,
out_sample_rate_,
av_get_default_channel_layout(in_channels_),
AV_SAMPLE_FMT_S16,
in_sample_rate_,
0, NULL);
if (!swr_ctx_) {
return -1;
}
int ret = swr_init(swr_ctx_);
if (ret < 0) {
return -1;
}
return 0;
}
int AudioResampler::ResampleFromS16ToFLTP(uint8_t *in_data, AVFrame *out_frame) {
const uint8_t* indata[AV_NUM_DATA_POINTERS] = {0};
indata[0] = in_data;
int samples = swr_convert(swr_ctx_, out_frame->data, out_frame->nb_samples,
indata, out_frame->nb_samples);
if (samples <= 0) {
return -1;
}
return samples;
}
void AudioResampler::DeInit() {
if (swr_ctx_) {
swr_free(&swr_ctx_);
}
}
AVFrame* AllocFltpPcmFrame(int channels, int nb_samples) {
AVFrame* pcm = NULL;
pcm = av_frame_alloc();
pcm->format = AV_SAMPLE_FMT_FLTP;
pcm->channels = channels;
pcm->channel_layout = av_get_default_channel_layout(channels);
pcm->nb_samples = nb_samples;
int ret = av_frame_get_buffer(pcm, 0);
if (ret != 0)
{
return NULL;
}
return pcm;
}
void FreePcmFrame(AVFrame* frame) {
if (frame) {
av_frame_free(&frame);
}
}
将编码之后的音视频数据放到一个固定的封装容器中就形成了我们日常生活中常见的视频文件了。音视频封装容器类的实现如下所示:
//mux.h
#ifndef _MUXER_MUX_H_
#define _MUXER_MUX_H_
#include
extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
}
class Muxer {
public:
Muxer();
~Muxer();
public:
//初始化封装器,清理封装器
int Init(const char* url);
void DeInit();
//添加对应的流
int AddStream(AVCodecContext* codec_ctx);
//添加文件头
int SendHeader();
//发送数据包
int SendPacket(AVPacket* pkt);
//发送文件尾
int SendTailer();
//打开文件输出
int Open();
int GetAudioStreamIndex();
int GetVideoStreamIndex();
private:
AVFormatContext* fmt_ctx_ = NULL;
std::string url_;
//音视频编码器
AVCodecContext* audio_codec_ctx_ = NULL;
AVCodecContext* video_codec_ctx_ = NULL;
//音视频流
AVStream* audio_stream_ = NULL;
AVStream* video_stream_ = NULL;
//流索引
int audio_idx_ = -1;
int video_idx_ = -1;
};
#endif
//mux.cpp
#include "mux.h"
Muxer::Muxer()
{
}
Muxer::~Muxer()
{
}
int Muxer::Init(const char* url)
{
int ret = avformat_alloc_output_context2(&fmt_ctx_, NULL,
NULL, url);
if (ret < 0) {
return -1;
}
url_ = url;
return 0;
}
void Muxer::DeInit() {
if (fmt_ctx_) {
avformat_close_input(&fmt_ctx_);
}
url_ = "";
audio_codec_ctx_ = NULL;
audio_stream_ = NULL;
audio_idx_ = -1;
video_codec_ctx_ = NULL;
video_stream_ = NULL;
video_idx_ = -1;
}
int Muxer::AddStream(AVCodecContext* codec_ctx)
{
if (!fmt_ctx_) {
return -1;
}
if (!codec_ctx) {
return -1;
}
AVStream* st = avformat_new_stream(fmt_ctx_, NULL);
if (!st) {
return -1;
}
avcodec_parameters_from_context(st->codecpar, codec_ctx);
av_dump_format(fmt_ctx_, 0, url_.c_str(), 1);
if (codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
audio_codec_ctx_ = codec_ctx;
audio_stream_ = st;
audio_idx_ = st->index;
} else if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
video_codec_ctx_ = codec_ctx;
video_stream_ = st;
video_idx_ = st->index;
}
return 0;
}
int Muxer::SendHeader()
{
if (!fmt_ctx_) {
return -1;
}
int ret = avformat_write_header(fmt_ctx_, NULL);
if (ret < 0) {
return -1;
}
return 0;
}
int Muxer::SendPacket(AVPacket* pkt)
{
int stream_idx = pkt->stream_index;
if (!pkt || pkt->size <= 0 || !pkt->data) {
if (pkt) {
av_packet_free(&pkt);
}
return -1;
}
AVRational src_time_base;
AVRational dst_time_base;
if (video_stream_ && video_codec_ctx_ && stream_idx == video_idx_) {
src_time_base = video_codec_ctx_->time_base;
dst_time_base = video_stream_->time_base;
} else if (audio_stream_ && audio_codec_ctx_ && stream_idx == audio_idx_) {
src_time_base = audio_codec_ctx_->time_base;
dst_time_base = audio_stream_->time_base;
}
pkt->pts = av_rescale_q(pkt->pts, src_time_base, dst_time_base);
pkt->dts = av_rescale_q(pkt->dts, src_time_base, dst_time_base);
pkt->duration = av_rescale_q(pkt->duration, src_time_base, dst_time_base);
int ret = 0;
ret = av_interleaved_write_frame(fmt_ctx_, pkt);
av_packet_free(&pkt);
if (ret == 0) {
return 0;
} else {
return -1;
}
}
int Muxer::SendTailer()
{
if (!fmt_ctx_) {
return -1;
}
int ret = av_write_trailer(fmt_ctx_);
if (ret != 0) {
return -1;
}
return 0;
}
int Muxer::Open()
{
int ret = avio_open(&fmt_ctx_->pb, url_.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) {
return -1;
}
return 0;
}
int Muxer::GetAudioStreamIndex() {
return audio_idx_;
}
int Muxer::GetVideoStreamIndex() {
return video_idx_;
}
实现音视频编码器、音频重采样、视频封装器之后,我们就可以将YUV数据和PCM音频数据封装成常见的视频格式了。不过还有一点很重要,需要注意,那就是在进行封装之前一定要同步音视频的时间基time_base,防止出现封装之后的视频文件中音视频不同步的问题。对应的调用实例如下所示,供大家参考。
//main.cpp
#define _CRT_SECURE_NO_WARNINGS
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavutil/avutil.h"
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
}
#include
#include "video_encoder.h"
#include "audio_encoder.h"
#include "audio_resampler.h"
#include "mux.h"
using namespace std;
#define YUV_WIDTH 1920 //视频宽
#define YUV_HEIGHT 1080 //视频高
#define YUV_FPS 25 //帧率
#define VIDEO_BIT_RATE 2000*1024 //码率
#define PCM_SAMPLE_FORMAT AV_SAMPLE_FMT_S16 //PCM格式
#define PCM_SAMPLE_RATE 44100 //采样率
#define PCM_CHANNELS 2 //通道数
#define AUDIO_BIT_RATE 128*1024 //音频比特率
//音视频时间基
#define AUDIO_TIME_BASE 1000000
#define VIDEO_TIME_BASE 1000000
//通过FFmpeg提取YUV和PCM数据
//ffmpeg -i test.mp4 -pix_fmt yuv420p test.yuv
//ffmpeg -i test.mp4 -vn -ar 44100 -ac 2 -f s16le test.pcm
int main(int argc, char* argv[]) {
//yuv和pcm文件地址
char* in_yuv_name = "D:\\Downloads\\ffmpeg\\yuv420p.yuv";
char* in_pcm_name = "D:\\Downloads\\ffmpeg\\video0.pcm";
//输出文件地址
char* out_mp4_name = "D:\\mux_out.flv";
//打开文件
FILE* in_yuv_fp= NULL;
FILE* in_pcm_fp = NULL;
in_yuv_fp = fopen(in_yuv_name, "rb");
in_pcm_fp = fopen(in_pcm_name, "rb");
if (!in_yuv_fp) {
return -1;
}
if (!in_pcm_fp) {
return -1;
}
int ret = 0;
int yuv_width = YUV_WIDTH;
int yuv_height = YUV_HEIGHT;
int yuv_fps = YUV_FPS;
int video_bit_rate = VIDEO_BIT_RATE;
//初始化视频编码器
VideoEncoder video_encoder;
ret = video_encoder.InitH264(yuv_width, yuv_height, yuv_fps, video_bit_rate);
if (ret < 0) {
return -1;
}
//读取YUV数据
int y_frame_size = yuv_width * yuv_height;
int u_frame_size = yuv_width * yuv_height / 4;
int v_frame_size = yuv_width * yuv_height / 4;
int yuv_frame_size = y_frame_size + u_frame_size + v_frame_size;
uint8_t* yuv_frame_buf = (uint8_t*)malloc(yuv_frame_size);
if (!yuv_frame_buf) {
return -1;
}
//初始化音频编码器
int pcm_channels = PCM_CHANNELS;
int pcm_sample_rate = PCM_SAMPLE_RATE;
int pcm_sample_format = PCM_SAMPLE_FORMAT;
int audio_bit_rate = AUDIO_BIT_RATE;
AudioEncoder audio_encoder;
ret = audio_encoder.InitAAC(pcm_channels, pcm_sample_rate, audio_bit_rate);
if (ret < 0) {
return -1;
}
//读取PCM数据
int pcm_frame_size = av_get_bytes_per_sample((AVSampleFormat)pcm_sample_format)
* pcm_channels * audio_encoder.GetFrameSize();
if (pcm_frame_size <= 0) {
return -1;
}
uint8_t* pcm_frame_buf = (uint8_t*)malloc(pcm_frame_size);
if (!pcm_frame_buf) {
return -1;
}
//初始化重采样类
AudioResampler audio_resample;
ret = audio_resample.InitFromS16ToFLTP(pcm_channels, pcm_sample_rate,
audio_encoder.GetChannels(), audio_encoder.GetSampleRate());
if (ret < 0) {
return -1;
}
//初始化视频封装器
Muxer video_muxer;
ret = video_muxer.Init(out_mp4_name);
if (ret < 0) {
return -1;
}
//添加音频流和视频流
ret = video_muxer.AddStream(video_encoder.GetCodecContext());
if (ret < 0) {
return -1;
}
ret = video_muxer.AddStream(audio_encoder.GetCodecContext());
if (ret < 0) {
return -1;
}
//打开封装器
ret = video_muxer.Open();
if (ret < 0) {
return -1;
}
ret = video_muxer.SendHeader();
if (ret < 0) {
return -1;
}
//时间基和pts
int64_t audio_time_base = AUDIO_TIME_BASE;
int64_t video_time_base = VIDEO_TIME_BASE;
double audio_pts = 0;
double video_pts = 0;
//获得音视频帧的长度
double audio_frame_duration = 1.0 * audio_encoder.GetFrameSize() /
pcm_sample_rate * audio_time_base;
double video_frame_duration = 1.0 / yuv_fps * video_time_base;
int audio_finish = 0;
int video_finish = 0;
int read_len = 0;
AVPacket* pkt = NULL;
std::vector<AVPacket*> pkts;
int audio_idx = video_muxer.GetAudioStreamIndex();
int video_idx = video_muxer.GetVideoStreamIndex();
while (true) {
//音视频文件读取完毕之后结束读取
if (audio_finish && video_finish)
{
break;
}
//读取视频帧
if ((video_finish != 1 && audio_pts > video_pts) ||
(video_finish != 1 && audio_finish == 1))
{
read_len = fread(yuv_frame_buf, 1, yuv_frame_size, in_yuv_fp);
if (read_len < yuv_frame_size)
{
video_finish = 1;
}
if (video_finish != 1)
{
ret = video_encoder.Encode(yuv_frame_buf, yuv_frame_size,
video_idx, video_pts, video_time_base,
pkts);
} else {
ret = video_encoder.Encode(NULL, 0, video_idx, video_pts,
video_time_base, pkts);
}
//递增pts
video_pts += video_frame_duration;
//写文件
if (ret >= 0)
{
for (int i = 0; i < pkts.size(); ++i)
{
ret = video_muxer.SendPacket(pkts[i]);
}
}
pkts.clear();
}
//读取音频帧
else if (audio_finish != 1)
{
read_len = fread(pcm_frame_buf, 1, pcm_frame_size, in_pcm_fp);
if (read_len < pcm_frame_size)
{
audio_finish = 1;
}
if (audio_finish != 1)
{
AVFrame* fltp_frame = AllocFltpPcmFrame(pcm_channels,
audio_encoder.GetFrameSize());
ret = audio_resample.ResampleFromS16ToFLTP(pcm_frame_buf, fltp_frame);
if (ret < 0) {
}
ret = audio_encoder.Encode(fltp_frame, audio_idx, audio_pts,audio_time_base, pkts);
FreePcmFrame(fltp_frame);
} else
{
ret = audio_encoder.Encode(NULL,audio_idx, audio_pts, audio_time_base, pkts);
}
audio_pts += audio_frame_duration;
if (ret >= 0)
{
for (int i = 0; i < pkts.size(); i++)
{
ret = video_muxer.SendPacket(pkts[i]);
}
}
pkts.clear();
}
}
ret = video_muxer.SendTailer();
if (ret < 0) {
}
printf("write video finish\n");
if (yuv_frame_buf) {
free(yuv_frame_buf);
}
if (pcm_frame_buf) {
free(pcm_frame_buf);
}
if (in_yuv_fp) {
fclose(in_yuv_fp);
}
if (in_pcm_fp) {
fclose(in_pcm_fp);
}
return 0;
}