ffmpeg视频解码后得到的图像经常需要用Opencv进行一些图像处理,两者之间需要互相转化,ffmpeg解码后的数据类型是AVFrame,而Opencv的图像数据结构是Mat,这就需要做个转化。
AVFrame 转 Mat
Mat AVFrameToMat(AVFrame *avframe,int width,int height)
{
if (width <= 0)
width = avframe->width;
if (height <= 0)
height = avframe->height;
struct SwsContext *sws_ctx = NULL;
sws_ctx = sws_getContext(avframe->width, avframe->height, (enum AVPixelFormat)avframe->format, width, height, AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);
cv::Mat mat;
mat.create(cv::Size(w, h), CV_8UC3);
AVFrame *bgr24frame = av_frame_alloc();
bgr24frame->data[0] = (uint8_t *)mat.data;
avpicture_fill((AVPicture *)bgr24frame, bgr24frame->data[0], AV_PIX_FMT_BGR24, w, h);
sws_scale(sws_ctx,
(const uint8_t* const*)avframe->data, avframe->linesize,
0,
avframe->height, // from cols=0,all rows trans
bgr24frame->data, bgr24frame->linesize);
av_free(bgr24frame);
sws_freeContext(sws_ctx);
return mat;
}
width,height 可以对输入图像进行resize,不resize,默认和原图像大小一致。
Mat 转 AVFrame
AVFrame *MatToAVFrame(Mat mat)
{
// alloc avframe
AVFrame *avframe = av_frame_alloc();
if (avframe && !mat.empty())
{
avframe->format = AV_PIX_FMT_YUV420P;
avframe->width = mat.cols;
avframe->height = mat.rows;
av_frame_get_buffer(avframe, 0);
av_frame_make_writable(avframe);
cv::Mat yuv; // convert to yuv420p first
cv::cvtColor(mat, yuv, cv::COLOR_BGR2YUV_I420);
// calc frame size
int frame_size = mat.cols * mat.rows;
unsigned char *pdata = yuv.data;
// fill yuv420
// yyy yyy yyy yyy
// uuu
// vvv
avframe->data[0] = pdata; // fill y
avframe->data[1] = pdata + frame_size; // fill u
avframe->data[2] = pdata + frame_size * 5 / 4; // fill v
}
return avframe;
}