opencv3.10光流法和前景提取法

//bgfg_segm.h

#ifndef BGFG_SEGM_H
#define BGFG_SEGM_H

#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"

using namespace cv;

enum Method
{
	MOG,
	MOG2,
	GMG,
	FGD_STAT
};

extern void foregroundExtraction(const Mat &frmae, Method m);

#endif


//bgfg_segm.cpp

#include 
#include 

#include "bgfg_segm.h"
#include "opencv2/cudabgsegm.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"

using namespace std;
using namespace cv::cuda;

static void fgd_Stat(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";
	const char *meanBackgroundImage = "mean background image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr fgd = cuda::createBackgroundSubtractorFGD();

	static GpuMat d_fgmask;
	static GpuMat d_bgimg;

	static Mat fgmask;
	static Mat fgimg;
	static Mat bgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		fgd->apply(d_frame, d_fgmask);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();

		fgd->apply(d_frame, d_fgmask);
		fgd->getBackgroundImage(d_bgimg);

		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);
		d_bgimg.download(bgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
		imshow(meanBackgroundImage, bgimg);
	}
}

static void mog(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";
	const char *meanBackgroundImage = "mean background image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr mog = cuda::createBackgroundSubtractorMOG();

	static GpuMat d_fgmask;
	static GpuMat d_bgimg;

	static Mat fgmask;
	static Mat fgimg;
	static Mat bgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		mog->apply(d_frame, d_fgmask, 0.01);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();

		mog->apply(d_frame, d_fgmask, 0.01);
		mog->getBackgroundImage(d_bgimg);

		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);
		d_bgimg.download(bgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
		imshow(meanBackgroundImage, bgimg);
	}
}

static void mog2(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";
	const char *meanBackgroundImage = "mean background image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr mog2 = cuda::createBackgroundSubtractorMOG2();

	static GpuMat d_fgmask;
	static GpuMat d_bgimg;

	static Mat fgmask;
	static Mat fgimg;
	static Mat bgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		mog2->apply(d_frame, d_fgmask);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();

		mog2->apply(d_frame, d_fgmask);
		mog2->getBackgroundImage(d_bgimg);

		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);
		d_bgimg.download(bgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
		imshow(meanBackgroundImage, bgimg);
	}
}

static void gmg(const Mat &frame)
{
	const char *imageWindow = "image";
	const char *foregroundMaskWindow = "foreground mask";
	const char *foregroundImageWindow = "foreground image";

	static bool isFirstFrame = true;
	static GpuMat d_frame;
	static Ptr gmg = cuda::createBackgroundSubtractorGMG(40);

	static GpuMat d_fgmask;

	static Mat fgmask;
	static Mat fgimg;

	d_frame.upload(frame);
	static GpuMat d_fgimg(d_frame.size(), d_frame.type());

	if (isFirstFrame)
	{
		gmg->apply(d_frame, d_fgmask);
		isFirstFrame = false;
	}
	else
	{
		int64 start = cv::getTickCount();
		gmg->apply(d_frame, d_fgmask);
		double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
		std::cout << "FPS : " << fps << std::endl;

		d_fgimg.setTo(Scalar::all(0));
		d_frame.copyTo(d_fgimg, d_fgmask);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);

		d_fgmask.download(fgmask);
		d_fgimg.download(fgimg);

		imshow(imageWindow, frame);
		imshow(foregroundMaskWindow, fgmask);
		imshow(foregroundImageWindow, fgimg);
	}
}

void foregroundExtraction(const Mat &frmae, Method m)
{
	assert(m >= MOG && m <= FGD_STAT);

	switch (m)
	{
	case FGD_STAT:
		fgd_Stat(frmae);
		break;

	case MOG:
		mog(frmae);
		break;

	case MOG2:
		mog2(frmae);
		break;

	case GMG:
		gmg(frmae);
		break;

	default:
		cout << "没有该方法,程序将退出." << endl;
		exit(1);
		break;
	}
}

//optical_flow.h

#ifndef OPTICAL_FLOW_H
#define OPTICAL_FLOW_H

#include "opencv2/core.hpp"
#include 

using namespace cv;

extern const char *optical_Flow_Name[];
extern Mat opticalFlowOut;

extern void optical_flow(const Mat &frame0, const Mat &frame1, const char *optical_Flow_Name);

#endif

//optical_flow.cpp

#include 
#include 

#include "optical_flow.h"
#include 
#include 
#include "opencv2/highgui.hpp"
#include "opencv2/cudaoptflow.hpp"
#include "opencv2/cudaarithm.hpp"

using namespace std;
using namespace cv::cuda;

Mat opticalFlowOut;					//原始光流输出图片,三通道		CV_8UC3

inline bool isFlowCorrect(Point2f u)
{
	return !cvIsNaN(u.x) && !cvIsNaN(u.y) && fabs(u.x) < 1e9 && fabs(u.y) < 1e9;
}

static Vec3b computeColor(float fx, float fy)
{
	static bool first = true;

	// relative lengths of color transitions:
	// these are chosen based on perceptual similarity
	// (e.g. one can distinguish more shades between red and yellow
	//  than between yellow and green)
	const int RY = 15;
	const int YG = 6;
	const int GC = 4;
	const int CB = 11;
	const int BM = 13;
	const int MR = 6;
	const int NCOLS = RY + YG + GC + CB + BM + MR;
	static Vec3i colorWheel[NCOLS];

	if (first)
	{
		int k = 0;

		for (int i = 0; i < RY; ++i, ++k)
			colorWheel[k] = Vec3i(255, 255 * i / RY, 0);

		for (int i = 0; i < YG; ++i, ++k)
			colorWheel[k] = Vec3i(255 - 255 * i / YG, 255, 0);

		for (int i = 0; i < GC; ++i, ++k)
			colorWheel[k] = Vec3i(0, 255, 255 * i / GC);

		for (int i = 0; i < CB; ++i, ++k)
			colorWheel[k] = Vec3i(0, 255 - 255 * i / CB, 255);

		for (int i = 0; i < BM; ++i, ++k)
			colorWheel[k] = Vec3i(255 * i / BM, 0, 255);

		for (int i = 0; i < MR; ++i, ++k)
			colorWheel[k] = Vec3i(255, 0, 255 - 255 * i / MR);

		first = false;
	}

	const float rad = sqrt(fx * fx + fy * fy);
	const float a = atan2(-fy, -fx) / (float)CV_PI;

	const float fk = (a + 1.0f) / 2.0f * (NCOLS - 1);
	const int k0 = static_cast(fk);
	const int k1 = (k0 + 1) % NCOLS;
	const float f = fk - k0;

	Vec3b pix;

	for (int b = 0; b < 3; b++)
	{
		const float col0 = colorWheel[k0][b] / 255.0f;
		const float col1 = colorWheel[k1][b] / 255.0f;

		float col = (1 - f) * col0 + f * col1;

		if (rad <= 1)
			col = 1 - rad * (1 - col); // increase saturation with radius
		else
			col *= .75; // out of range

		pix[2 - b] = static_cast(255.0 * col);
	}

	return pix;
}

static void drawOpticalFlow(const Mat_& flowx, const Mat_& flowy, Mat& dst, float maxmotion = -1)
{
	static bool isFirstTime = true;
	if (isFirstTime)
	{
		dst.create(flowx.size(), CV_8UC3);
		isFirstTime = false;
	}

	dst.setTo(Scalar::all(0));

	// determine motion range:
	float maxrad = maxmotion;

	if (maxmotion <= 0)
	{
		maxrad = 1;
		for (int y = 0; y < flowx.rows; ++y)
		{
			for (int x = 0; x < flowx.cols; ++x)
			{
				Point2f u(flowx(y, x), flowy(y, x));

				if (!isFlowCorrect(u))
					continue;

				maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
			}
		}
	}

	for (int y = 0; y < flowx.rows; ++y)
	{
		for (int x = 0; x < flowx.cols; ++x)
		{
			Point2f u(flowx(y, x), flowy(y, x));

			if (isFlowCorrect(u))
				dst.at(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
		}
	}
}

static void showFlow(const char* name, const GpuMat& d_flow)
{
	static GpuMat planes[2];
	cuda::split(d_flow, planes);

	Mat flowx(planes[0]);
	Mat flowy(planes[1]);

	//drawOpticalFlow(flowx, flowy, opticalFlowOut, 10);
	drawOpticalFlow(flowx, flowy, opticalFlowOut);	//-1

	imshow(name, opticalFlowOut);
}

//Brox光流法
static void optical_flow_Brox(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr brox = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10);

	static GpuMat d_frame0f;
	static GpuMat d_frame1f;

	d_frame0.convertTo(d_frame0f, CV_32F, 1.0 / 255.0);
	d_frame1.convertTo(d_frame1f, CV_32F, 1.0 / 255.0);

	//int64 start = getTickCount();

	brox->calc(d_frame0f, d_frame1f, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "Brox : " << timeSec << " sec" << endl;

	showFlow("Brox", d_flow);
}

//LK光流法
static void optical_flow_LK(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr lk = cuda::DensePyrLKOpticalFlow::create(Size(7, 7));

	//int64 start = getTickCount();

	lk->calc(d_frame0, d_frame1, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "LK : " << timeSec << " sec" << endl;

	showFlow("LK", d_flow);
}

//Farn光流法
static void optical_flow_Farn(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr farn = cuda::FarnebackOpticalFlow::create();
	//int64 start = getTickCount();

	farn->calc(d_frame0, d_frame1, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "Farn : " << timeSec << " sec" << endl;

	showFlow("Farn", d_flow);
}

//TVL1光流法
static void optical_flow_TVL1(const Mat &frame0, const Mat &frame1)
{
	static GpuMat d_frame0;
	static GpuMat d_frame1;

	d_frame0.upload(frame0);
	d_frame1.upload(frame1);

	static GpuMat d_flow(frame0.size(), CV_32FC2);

	static Ptr tvl1 = cuda::OpticalFlowDual_TVL1::create();
	//int64 start = getTickCount();

	tvl1->calc(d_frame0, d_frame1, d_flow);

	//double timeSec = (getTickCount() - start) / getTickFrequency();
	//cout << "TVL1 : " << timeSec << " sec" << endl;

	showFlow("TVL1", d_flow);
}

typedef void(*optical_Flow)(const Mat &, const Mat &);
static optical_Flow optical_Flow_Method[] = { optical_flow_Brox, \
optical_flow_LK, \
optical_flow_Farn, \
optical_flow_TVL1, \
NULL };

const char *optical_Flow_Name[] = { "Brox", "LK", "Farn", "TVL1", NULL };

static int getIndexOfOpticalFlowMethod(const char *methodName)
{
	assert(methodName != NULL);
	for (int i = 0; i < sizeof(optical_Flow_Name) / sizeof(*optical_Flow_Name) - 1; i++)
	{
		if (strcmp(methodName, optical_Flow_Name[i]) == 0)
		{
			return i;
		}
	}

	return -1;
}

//fram0前一帧图像,frame1frame1当前帧图像,均为灰度图
void optical_flow(const Mat &frame0, const Mat &frame1, const char *optical_Flow_Name)
{
	static bool isFirstTime = true;
	if (isFirstTime)
	{
		assert(optical_Flow_Name != NULL && !frame0.empty() && !frame1.empty() && frame0.size() == frame1.size());
		isFirstTime = false;
	}
	
	/*if (optical_Flow_Name == NULL)
	{
	cout << "请指定正确的光流方法" << endl;
	return;
	}*/

	//static int methodIndex = getIndexOfOpticalFlowMethod(optical_Flow_Name);
	static int methodIndex = getIndexOfOpticalFlowMethod(optical_Flow_Name);

	assert(methodIndex > -1);
	/*if (methodIndex == -1)
	{
	cout << "请指定正确的光流方法" << endl;
	return;
	}*/

	optical_Flow_Method[methodIndex](frame0, frame1);
}

//main.cpp

#include "bgfg_segm.h"
#include "optical_flow.h"
#include 
#include 
#include 
#include 
#include "opencv2/highgui.hpp"
#include 

int main(int argc, char *argv[])
{
	const char *orignalFrameWin = "原始视频帧流";

	//char *fileName = "E:/opencv2.48/sources/samples/gpu/768x576.avi";
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00152.MP4";
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00153.MP4"; 
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/20161101050502.MTS";
	//char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00151.MP4";
	char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00158.MP4";
	//const char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00152.MP4";
	//const char *fileName = "D:/FarnebackInGPU/myProject/myProject/data/car/MAH00154.MP4";
	
	Method foregroudExtMethod = MOG;	//FGD_STAT, MOG, MOG2, GMG
	int frames = 0;
	Mat currentFrame, previousFrame;
	Mat currentGrayFrame, previousGrayFrame;
	bool isFirstFrame = true;

	VideoCapture cap;
	cap.open(fileName);
	if (!cap.isOpened())
	{
		fprintf(stderr, "Video can't be opened!\n");
		return 1;
	}

	cudaError_t cudaStatus;
	cudaStatus = cudaSetDevice(0);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
		return 1;
	}

	while (cap.isOpened())
	{
		cap >> currentFrame;
		if (!currentFrame.data)
		{
			fprintf(stderr, "Frame is empty!\n");
			break;
		}

		while (currentFrame.cols > 800)
		{
			resize(currentFrame, currentFrame, Size(currentFrame.cols / 2, currentFrame.rows / 2));
		}

		frames++;
		cv::cvtColor(currentFrame, currentGrayFrame, CV_RGB2GRAY);
		std::cout << "第" << frames << "帧" << std::endl;
		imshow(orignalFrameWin, currentFrame);

		if (isFirstFrame)
		{
			swap(currentGrayFrame, previousGrayFrame);
			isFirstFrame = false;
		}
		else
		{
			//计算光流
			//const char *optical_Flow_Method_Name[] = { "Brox", "LK", "Farn", "TVL1", NULL };
			optical_flow(previousGrayFrame, currentGrayFrame, optical_Flow_Name[0]);
			swap(currentGrayFrame, previousGrayFrame);
		}

		//前景提取
		foregroundExtraction(currentFrame, foregroudExtMethod);

		int key = cv::waitKey(1);
		if (key == 27) break;
		else if (key == 'p' || key == 'P') cv::waitKey();
	}
	
	cudaStatus = cudaDeviceReset();
	if (cudaStatus != cudaSuccess)
	{
		fprintf(stderr, "cudaDeviceReset failed!");
		return 1;
	}

	return 0;
}


你可能感兴趣的:(opencv3常用代码示例,c/c++)