1.首先使用videoInput打开摄像头进行视频采集
2.打开指定编码器,这里使用libx264。也可以使用h264_qsv,h264_nvenc进行硬编码。
3.采集获取的是rgb数据,编码需要使用yuv,创建像素格式转换。
4.编码后的数据需要保存在文件,创建输出封装格式上下文。
5.打开输出,不断的获取采集数据,进行像素格式转换,编码,写入文件。
#include
#include
#include "videoInput.h"
extern "C" {
#include "libavformat\avformat.h"
#include "libavcodec\avcodec.h"
#include "libavutil\avutil.h"
#include "libswscale\swscale.h"
#include "libswresample\swresample.h"
#include "libavutil\imgutils.h"
#include "libavutil\time.h"
}
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "swresample.lib")
using std::cout;
using std::endl;
static const int s_width = 1280;
static const int s_height = 720;
static const int s_fps = 12;
static const int s_gopSize = 5 * s_fps;
static const int s_bitrate = 1000000;
void printErr(char *info, int ret)
{
char err[1024] = { 0 };
av_strerror(ret, err, sizeof(err));
cout << info << ": " << err << endl;
system("pause");
}
double r2d(AVRational r)
{
return r.den == 0 ? 0 : r.num * 1.0 / r.den;
}
int main()
{
//初始化视频采集
int deviceId = 0;
videoInput vi;
vi.setIdealFramerate(deviceId, s_fps);
if (!vi.setupDevice(deviceId, s_width, s_height)) {
cout << "setupDevice failed" << endl;
return -1;
}
//打开h264编码器
AVCodec *codec = avcodec_find_encoder_by_name("libx264");
if (codec == nullptr) {
cout << "avcodec_find_encoder failed" << endl;
return -1;
}
AVCodecContext *codecCtx = avcodec_alloc_context3(codec);
if (codec == nullptr) {
cout << "avcodec_alloc_context3 failed" << endl;
return -1;
}
codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //加上此标识可以从extradata获取sps pps
codecCtx->codec_id = codec->id; //编码器类型h264
codecCtx->width = s_width; //输入图像的宽度
codecCtx->height = s_height; //输入图像的高度
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P; //输入图像的像素格式
codecCtx->bit_rate = s_bitrate; //码率
codecCtx->framerate = { s_fps, 1 }; //帧率
codecCtx->time_base = { 1, s_fps }; //timebase,pts的单位
codecCtx->gop_size = s_gopSize; //gop的大小
codecCtx->max_b_frames = 0; //b帧的数量
AVDictionary *options = nullptr;
av_dict_set(&options, "preset", "veryfast", 0);
av_dict_set(&options, "tune", "zerolatency", 0);
av_dict_set(&options, "profile", "main", 0);
int ret = avcodec_open2(codecCtx, nullptr, &options);
if (ret != 0) {
printErr("avcodec_open2", ret);
return -1;
}
//初始化像素格式转换上下文
SwsContext *swsCtx = NULL;
swsCtx = sws_getCachedContext(nullptr, s_width, s_height, AV_PIX_FMT_RGB24, //src w,h,fmt
s_width, s_height, AV_PIX_FMT_YUV420P, //dst w,h,fmt
SWS_BICUBIC, //尺寸变化算法
NULL, NULL, NULL);
if (swsCtx == NULL) {
printErr("sws_getCachedContext", ret);
return -1;
}
//创建采集数据rgb缓存以及格式转换后yuv缓存
uint8_t *rgb[AV_NUM_DATA_POINTERS] = { 0 };
int lineSize[AV_NUM_DATA_POINTERS] = { 0 };
ret = av_image_alloc(rgb, lineSize, s_width, s_height, AV_PIX_FMT_RGB24, 1);
if (ret <= 0) {
printErr("av_image_alloc", ret);
return -1;
}
AVFrame *frame = av_frame_alloc();
frame->width = s_width;
frame->height = s_height;
frame->pts = 0;
frame->pkt_dts = 0;
frame->format = codecCtx->pix_fmt;
ret = av_frame_get_buffer(frame, 0);
if (ret != 0) {
printErr("av_frame_get_buffer", ret);
return -1;
}
//创建输出封装格式上下文
char *outPath = "e:/haha.avi";
AVFormatContext *outCtx = nullptr;
ret = avformat_alloc_output_context2(&outCtx, nullptr, nullptr, outPath);
if (ret != 0) {
printErr("avformat_alloc_output_context2", ret);
return -1;
}
AVStream *st = avformat_new_stream(outCtx, nullptr);
if (st == nullptr) {
printErr("avformat_new_stream", ret);
return -1;
}
ret = avcodec_parameters_from_context(st->codecpar, codecCtx);
if (ret != 0) {
printErr("avcodec_parameters_from_context", ret);
return -1;
}
/打开输出文件//
ret = avio_open(&outCtx->pb, outPath, AVIO_FLAG_WRITE);
if (ret != 0) {
printErr("avio_open", ret);
return -1;
}
ret = avformat_write_header(outCtx, nullptr);
if (ret != 0) {
printErr("avformat_write_header", ret);
return -1;
}
/读取采集数据进行编码写入文件//
AVPacket *pkt = av_packet_alloc();
int64_t curPts = 0;
while (true) {
if (vi.isFrameNew(deviceId)) {
if (!vi.getPixels(deviceId, rgb[0], true, true)) {
continue;
}
ret = sws_scale(swsCtx, rgb, lineSize, 0, s_height, frame->data, frame->linesize);
if (ret <= 0) {
printErr("sws_scale", ret);
break;
}
frame->pts = curPts++;
ret = avcodec_send_frame(codecCtx, frame);
if (ret != 0) {
printErr("avcodec_send_frame", ret);
break;
}
ret = avcodec_receive_packet(codecCtx, pkt);
if (ret != 0) {
if (ret == AVERROR(EAGAIN)) {
continue;
}
printErr("avcodec_send_frame", ret);
break;
}
pkt->pts = av_rescale_q_rnd(pkt->pts, codecCtx->time_base, outCtx->streams[0]->time_base,
(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt->dts = av_rescale_q_rnd(pkt->dts, codecCtx->time_base, outCtx->streams[0]->time_base,
(AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
av_interleaved_write_frame(outCtx, pkt);
}
std::this_thread::sleep_for(std::chrono::milliseconds(20));
}
avio_close(outCtx->pb);
av_frame_free(&frame);
av_packet_free(&pkt);
avcodec_free_context(&codecCtx);
sws_freeContext(swsCtx);
swsCtx = nullptr;
avformat_free_context(outCtx);
outCtx = nullptr;
return 0;
}