【gstreamer opencv::Mat】使用gstreamer读取视频中的每一帧为cv::Mat

文章目录

    • 序言
    • 实现
        • VideoReader.h
        • VideoReader.cc
        • ReaderDemo.cc
        • CMakeLists.txt
      • 参考文献

序言

  • 需求:读取视频中每一帧,通过gstreamer的插件转换成cv::Mat,并且实现调用一次read函数,读取一次frame
  • 分析:gstreamer中的数据是在叫做pipeline(管道)中流动,最后流到sink中,sink包括显示设备videosink,文件filesink,虚拟设备appsink。其中appsink能够实现将管道中的数据取出,拷贝给外层的其他app应用,即我们的主程序中cv::Mat变量。
  • github地址:VideoReaderWriteOfRB5

实现

gstreamer流程图如图所示:
【gstreamer opencv::Mat】使用gstreamer读取视频中的每一帧为cv::Mat_第1张图片

以下代码以通过测试,运行sudo ./VideoReaderDome ./input.mp4 ./即可得到转换成cv::Mat并保存为jpg的图片。废话少说直接看代码:

VideoReader.h

#ifndef __VIDEO_READER_H__
#define __VIDEO_READER_H__

#include "opencv2/opencv.hpp"
#include 

class VideoReader {
public:
    /**
     * @brief 打开视频, url为视频的文件路径或者网络地址
     */
    int Open(const std::string &url); 

    /**
     * 读取视频帧,顺序读取,时间戳单位秒
     */ 
    int Read(cv::Mat &frame, double ×tamp);

    /**
     * @brief 视频帧率 
     * @param fps = framerate.first/framerate.second
     */
    std::pair Framerate() {  
        return framerate_; 
    }
    
	/**
     * @brief 需要输入原视频宽高, 因为通过gst得到的是对齐后宽高,需要是16的整数倍
     */
    void InputOriginSize(const int width, const int height) {
        width_ = width;
        height_ = height;
    }

    ~VideoReader();

private:
    // int NextFrame(AVFrame *frame);
    int RecvDecodedFrame(cv::Mat& frame, double& timestamp);
    GstElement* pipeline_;
    GstElement* source_;
    GstElement* qtdemux_;
    GstElement* queue_;
    GstElement* h264parse_;
    GstElement* omxh264dec_;
    GstElement* sink_;

    std::string srcFmt_;
    int paddedWidth_ = 0;
    int paddedHeight_ = 0;
    int width_ = 0;
    int height_ = 0;
    std::pair framerate_;
};

#endif

VideoReader.cc

#include "VideoReader.h"
#include 

static inline void QtdemuxPadAddedCb(GstElement *qtdemux, GstPad *pad, GstElement *queue) {
    gst_element_link_pads(qtdemux, GST_PAD_NAME(pad), queue, nullptr);
}

// 错误或EOS流结束处理函数,因为会阻塞线程,不用它
static inline void ErrHandle(GstElement *pipeline) {
    // Wait until error or EOS
    auto bus = gst_element_get_bus(pipeline);
    auto msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ERROR | GST_MESSAGE_EOS));

    // Message handling
    if (msg != nullptr) {
        switch (GST_MESSAGE_TYPE(msg)) {
            case GST_MESSAGE_ERROR: {
                GError *err = nullptr;
                gchar *debug_info = nullptr;
                gst_message_parse_error(msg, &err, &debug_info);
                std::cerr<< "Error received:" << err->message << std::endl;
                if (debug_info) {
                    std::cerr<< "Debugging information:" << debug_info << std::endl;
                }
                g_clear_error(&err);
                g_free(debug_info);
            }
                break;
            case GST_MESSAGE_EOS:
                std::cout << "End-Of-Stream reached" << std::endl;
                break;
            default:
                std::cout << "Unexpected message received" << std::endl;
                break;
        }
        gst_message_unref(msg);
    }

    // Free resources
    gst_object_unref(bus);
}

// 主要功能:将pipeline中的数据拷贝到cv:Mat
int VideoReader::RecvDecodedFrame(cv::Mat& frame, double& timestamp) {
    GstSample *sample;
    // 使用pull-sample拉取视频帧,并映射到map变量,通过map拷贝出frame数据
    g_signal_emit_by_name(sink_, "pull-sample", &sample);
    if (sample) {
        auto buffer = gst_sample_get_buffer(sample);

        // fetch timestamp
        timestamp = static_cast(GST_BUFFER_PTS(buffer)) / static_cast(GST_SECOND);
        // std::cout << "timestamp:" << timestamp << std::endl;

        // copy buffer data into cv::Mat
        GstMapInfo map;
        if (gst_buffer_map(buffer, &map, GST_MAP_READ)) {
            std::cout << "recv data size:" << map.size << std::endl;
            if (srcFmt_ != "NV12" && srcFmt_ != "I420") {
                std::cout << "unsupported src pixel format" << std::endl;
                return -1;
            }
            static cv::Mat image;
                if (image.empty()) {
                    image.create(cv::Size(width_, height_ * 3 / 2), CV_8UC1);
                }
                // 1. copy into cv::Mat
                if (paddedWidth_ == width_) {
                    memcpy(image.data, map.data, width_ * sizeof(uint8_t) * height_);
                    memcpy(image.data + height_ * width_, map.data + paddedHeight_ * width_, width_ * sizeof(uint8_t) * height_ / 2);
                } else {
                    // copy Y-channel, jump the padding width
                    for (int i = 0; i < height_; ++i) {
                        memcpy(image.data + i * width_, map.data + i * paddedWidth_, width_ * sizeof(uint8_t));
                    }
                    // copy UV-channel, jump the padding width
                    for (int i = 0; i < height_ / 2; ++i) {
                        memcpy(image.data + (height_ + i) * width_, map.data + (paddedHeight_ + i) * paddedWidth_, width_ * sizeof(uint8_t));
                    }
                }
                
                // 2. convert format
                if (srcFmt_ == "NV12") {
                     cv::cvtColor(image, frame, cv::COLOR_YUV2BGR_NV12);
                } else {
                     cv::cvtColor(image, frame, cv::COLOR_YUV2BGR_I420);
                }

            // release buffer mapping
            gst_buffer_unmap(buffer, &map);
        }
        // release sample reference
        gst_sample_unref(sample);
        return 0;
    } else {
      std::cerr << "recv null frame"  << std::endl;
      return -1;
    }
}

int VideoReader::Open(const std::string& url) {
    // create the elements
    source_     = gst_element_factory_make("filesrc", "InputFile");
    qtdemux_    = gst_element_factory_make("qtdemux", "QtDemux");
    queue_      = gst_element_factory_make("queue", "QueueReader");
    h264parse_  = gst_element_factory_make("h264parse", "H264Parse");
    omxh264dec_ = gst_element_factory_make("omxh264dec", "OmxH264Dec");
    sink_       = gst_element_factory_make("appsink", "CustomSink");

    pipeline_ = gst_pipeline_new("decode-pipeline");

    if (!pipeline_ || !source_ || !qtdemux_ || !queue_ || !omxh264dec_ || !h264parse_ || !sink_) {
        std::cerr<< "Not all elements could be created" << std::endl;
        return -1;
    }
    // Modify element properties
    g_object_set(G_OBJECT(source_), "location", url.c_str(), nullptr);
    g_object_set(G_OBJECT(sink_), "emit-signals", TRUE, "max-buffers", 1, nullptr);

    // Build the pipeline
    gst_bin_add_many(GST_BIN(pipeline_), source_, qtdemux_, queue_, h264parse_, omxh264dec_, sink_, nullptr);

    if (gst_element_link(source_, qtdemux_) != TRUE ) {
        std::cerr<< "source and qtdemux could not be linked"  << std::endl;
        // gst_object_unref(pipeline_);
        return -1;
    }
    if (gst_element_link_many(queue_, h264parse_, omxh264dec_, sink_, nullptr) != TRUE ) {
        std::cerr<< "queue, h264parse, omxh264dec, and sink could not be linked"  << std::endl;
        // gst_object_unref(pipeline);
        return -1;
    }
    // dynamic padded connect between demux and queue
    g_signal_connect(qtdemux_, "pad-added", (GCallback)QtdemuxPadAddedCb, queue_);

    GstStateChangeReturn ret = gst_element_set_state(pipeline_, GST_STATE_PLAYING);
    if (ret == GST_STATE_CHANGE_FAILURE) {
        std::cerr<< "Unable to set the pipeline to the paused state" << std::endl;
        return -1;
    }
    GstSample *sample;
    g_signal_emit_by_name(sink_, "pull-preroll", &sample);
    if (sample) {
        //std::cout << "recv frame" << std::endl;
        auto buffer = gst_sample_get_buffer(sample);
        // fetch video infomation
        if (paddedHeight_ == 0 && paddedWidth_ == 0) {
            GstCaps *caps = gst_sample_get_caps(sample);
            GstStructure* info = gst_caps_get_structure(caps, 0);
            gst_structure_get_int(info, "width", &paddedWidth_);
            gst_structure_get_int(info, "height", &paddedHeight_);
            const char* format = gst_structure_get_string(info, "format");
            gst_structure_get_fraction(info, "framerate", &framerate_.first, &framerate_.second);
            srcFmt_ = format;

            std::cout << "padded width:" << paddedWidth_ << "padded height:" << paddedHeight_ << std::endl;
            std::cout << "format:" << srcFmt_ << std::endl;
            std::cout << "framerate num:" << framerate_.first << "framerate den:" << framerate_.second << std::endl;
        }
        // release sample reference
        gst_sample_unref(sample);
    }

    // set pipeline to playing
    ret = gst_element_set_state(pipeline_, GST_STATE_PLAYING);
    if (ret == GST_STATE_CHANGE_FAILURE) {
        std::cerr<< "Unable to set the pipeline to the playing state" << std::endl;
        return -1;
    }

    // handle error or EOS. Atention: error handle will block, so don't use it.
    // ErrHandle(pipeline_);
    return 0;
}

int VideoReader::Read(cv::Mat &frame, double ×tamp) {
    return RecvDecodedFrame(frame, timestamp);
}

VideoReader::~VideoReader() {
    if (pipeline_) {
        gst_element_set_state(pipeline_, GST_STATE_NULL);
        gst_object_unref(pipeline_);
        pipeline_ = nullptr;
    }
}

ReaderDemo.cc

#include 
#include "VideoReader.h"
#include 

void TestVideoReader(std::string url, std::string outUrl, int count) {
    std::cout << "video:" << url << std::endl;

    VideoReader video;
    // 需要输入原视频尺寸
    video.InputOriginSize(1920,1080);

    auto ret = video.Open(url);
    if (ret < 0) return;

    cv::Mat frame;
    int seq = 0;
    double timestamp = .0;

    while (seq++ < count) {
        std::cout << "reading " << seq << "th loop." << std::endl;

        auto ret = video.Read(frame, timestamp);
        if (ret < 0) break;

        std::string filename = outUrl + "/" + std::to_string(seq) + ".jpg";
        cv::imwrite(filename, frame);
    }

    std::cout << "video read over" << std::endl;
}


int main(int argc, char* argv[]) {
    gst_init(&argc, &argv);

    std::string inputUrl(argv[1]);
    std::string outputUrl(argv[2]);

    std::cout << "read video:" << inputUrl << std::endl;

    TestVideoReader(inputUrl, outputUrl, 50);

    return 0;
}

CMakeLists.txt

配置opencv编译,可以参考配置opencv cmake

cmake_minimum_required(VERSION 3.6)
PROJECT(sample)

find_package(OpenCV required)
# include(/home/jungle/smm/dependencies/opencv/qualcomm/lib/cmake/opencv4/OpenCVConfig.cmake)

add_executable(VideoReaderDome ReaderDemo.cc VideoReader.cc VideoReader.h)

target_link_libraries(VideoReaderDome PUBLIC ${OpenCV_LIBS})
target_link_libraries(VideoReaderDome PUBLIC "-lgstreamer-1.0 -lgobject-2.0 -lglib-2.0")
target_include_directories(VideoReaderDome PUBLIC /usr/include/gstreamer-1.0)
target_include_directories(VideoReaderDome PUBLIC /usr/include/gobject-2.0)
target_include_directories(VideoReaderDome PUBLIC /usr/include/glib-2.0)
target_include_directories(VideoReaderDome PUBLIC ${OpenCV_INCLUDE_DIRS})

参考文献

【gstreamer中appsink和appsrc操作数据转换cv::Mat】参考文献

你可能感兴趣的:(gstreamer,音视频,opencv,gstreamer)