在https://blog.csdn.net/fengbingchun/article/details/93975325 中给出了通过旧接口即FFmpeg中已废弃的接口实现通过摄像头获取视频流然后解码并显示的测试代码,这里通过使用FFmpeg中的新接口再次实现通过的功能,主要涉及到的接口函数包括:
1. avdevice_register_all:初始化libavdevice库并注册所有输入输出设备;
2. av_find_input_format:根据输入格式的名字查找AVInputFormat,在测试代码中,windows平台使用”vfwcap”(video for windows capture),linux平台使用”v4l2”(Video4Linux2);
3. avformat_alloc_context:分配AVFormatContext;
4. av_dict_set:设置或重写一个AVDictionary项,测试代码中设置video_size为640x480,设置input_format为mjpeg,这两个设置仅对usb摄像头有效,对windows内置摄像头会crash;
5. avformat_open_input:打开输入流并读取header;
6. avformat_find_stream_info:读取媒体文件的数据包以获取流信息;
7. 通过AVFormatContext中AVStream查找视频/音频流索引,这里在windows10下获取到的编码类型为mjpeg即AV_CODEC_ID_MJPEG,像素格式为yuv422p即AV_PIX_FMT_YUVJ422P;在linux或windows7下获取到的原始编码类型为rawvideo即AV_CODEC_ID_RAWVIDEO,原始像素格式yuyv422即AV_PIX_FMT_YUYV422;由于通过av_dict_set进行了设置,因此编码类型由rawvideo调整成了mjpeg,可见可以通过av_dict_set对usb摄像头的原有配置进行调整;
8. avcodec_find_decoder:由codec ID查找已注册的解码器;
9. avcodec_alloc_context3:分配一个AVCodecContext并设置它的字段为默认值;
10. avcodec_open2:初始化AVCodecContext,由于前面使用avcodec_alloc_context3,因此在调用avcodec_open2之前,需要对AVCodecContext的某些字段进行指定值,如宽、高、像素格式等,thread_count用于指定几个线程来进行解码;
11. av_frame_alloc:分配一个AVFrame并设置它的字段为默认值;
12. av_malloc:为一个AVPacket分配内存块;
13. sws_getContext:分配一个SwsContext;
14. av_image_alloc:根据指定的宽、高、像素格式为图像分配buffer;
15. av_read_frame:获取流即packet(AVPacket);
16. avcodec_send_packet:提供原始packet数据作为解码器的输入;
17. avcodec_receive_frame:从解码器中获取解码后的数据;
18. sws_scale:转换图像格式;
19. av_packet_unref:释放AVPacket;
20. av_frame_free:释放由av_frame_alloc分配的AVFrame;
21. sws_freeContext:释放由sws_getContext分配的SwsContext;
22. av_freep:释放由av_malloc分配的AVPacket;
23. avformat_close_input:关闭打开的AVFormatContext并释放;
24. av_dict_free:释放由av_dist_set分配的AVDictionary;
25. av_freep:释放由av_image_alloc分配的buffer。
测试代码(test_ffmpeg_decode_show.cpp):
#include "funset.hpp"
#include
#include
#include
#include
#ifdef __cplusplus
extern "C" {
#endif
#include
#include
#include
#include
#include
#include
#ifdef __cplusplus
}
#endif
#include
int test_ffmpeg_decode_show_new()
{
avdevice_register_all();
AVDictionary* options = nullptr;
#ifdef _MSC_VER
const char* input_format_name = "vfwcap";
const char* url = "";
#else
const char* input_format_name = "video4linux2";
const char* url = "/dev/video0";
av_dict_set(&options, "video_size", "640x480", 0);
av_dict_set(&options, "input_format", "mjpeg", 0);
#endif
AVInputFormat* input_fmt = av_find_input_format(input_format_name);
AVFormatContext* format_ctx = avformat_alloc_context();
int ret = avformat_open_input(&format_ctx, url, input_fmt, &options);
if (ret != 0) {
fprintf(stderr, "fail to open url: %s, return value: %d\n", url, ret);
return -1;
}
ret = avformat_find_stream_info(format_ctx, nullptr);
if (ret < 0) {
fprintf(stderr, "fail to get stream information: %d\n", ret);
return -1;
}
int video_stream_index = -1;
for (unsigned int i = 0; i < format_ctx->nb_streams; ++i) {
const AVStream* stream = format_ctx->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
fprintf(stdout, "type of the encoded data: %d, dimensions of the video frame in pixels: width: %d, height: %d, pixel format: %d\n",
stream->codecpar->codec_id, stream->codecpar->width, stream->codecpar->height, stream->codecpar->format);
}
}
if (video_stream_index == -1) {
fprintf(stderr, "no video stream\n");
return -1;
}
AVCodecParameters* codecpar = format_ctx->streams[video_stream_index]->codecpar;
const AVCodec* codec = avcodec_find_decoder(codecpar->codec_id);
if (!codec) {
fprintf(stderr, "fail to avcodec_find_decoder\n");
return -1;
}
AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
if (!codec_ctx) {
fprintf(stderr, "fail to avcodec_alloc_context3\n");
return -1;
}
codec_ctx->pix_fmt = AVPixelFormat(codecpar->format);
codec_ctx->height = codecpar->height;
codec_ctx->width = codecpar->width;
codec_ctx->thread_count = 4;
ret = avcodec_open2(codec_ctx, codec, nullptr);
if (ret != 0) {
fprintf(stderr, "fail to avcodec_open2: %d\n", ret);
return -1;
}
AVFrame* frame = av_frame_alloc();
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
SwsContext* sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_BGR24, 0, nullptr, nullptr, nullptr);
if (!frame || !packet || !sws_ctx) {
fprintf(stderr, "fail to alloc\n");
return -1;
}
uint8_t* bgr_data[4];
int bgr_linesize[4];
av_image_alloc(bgr_data, bgr_linesize, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_BGR24, 1);
cv::Mat mat(codec_ctx->height, codec_ctx->width, CV_8UC3);
const char* winname = "usb video2";
cv::namedWindow(winname);
while (1) {
ret = av_read_frame(format_ctx, packet);
if (ret >= 0 && packet->stream_index == video_stream_index) {
ret = avcodec_send_packet(codec_ctx, packet);
if (ret < 0) {
fprintf(stderr, "fail to avcodec_send_packet: %d\n", ret);
av_packet_unref(packet);
continue;
}
ret = avcodec_receive_frame(codec_ctx, frame);
if (ret < 0) {
fprintf(stderr, "fail to avcodec_receive_frame\n");
av_packet_unref(packet);
continue;
}
sws_scale(sws_ctx, frame->data, frame->linesize, 0, codec_ctx->height, bgr_data, bgr_linesize);
mat.data = bgr_data[0];
cv::imshow(winname, mat);
}
av_packet_unref(packet);
int key = cv::waitKey(25);
if (key == 27) break;
}
cv::destroyWindow(winname);
av_frame_free(&frame);
sws_freeContext(sws_ctx);
av_dict_free(&options);
avformat_close_input(&format_ctx);
av_freep(packet);
av_freep(&bgr_data[0]);
fprintf(stdout, "test finish\n");
return 0;
}
执行结果如下:
GitHub:https://github.com//fengbingchun/OpenCV_Test