接上篇,最近在学习c++矩阵库,顺便把vibe算法用c++矩阵库armadillo做了一遍。虽然效果不理想,但是借这个机会算是把armadillo、Eigen、numcpp等几个注明的矩阵库都大致学习了一遍,还是很有收货的。反观效果发现,在使用armadillo库实现算法时,虽然也设置了并行处理(#define use_openmp),但并行效果并不理想,每帧处理时间达到150多ms,CPU利用率只有44%左右。反而用最原始的for循环,开启openmp并行化之后只需要20ms以内就可以处理一帧,而且效果很好,CPU利用率达到了88%。这次实验主要的收货就是学习了c++矩阵库的主要功能。
下面上代码
vibe.h
#ifndef VIBE_H
#define VIBE_H
#include
#include
#include
#include "opencv2/opencv.hpp"
#define ARMA_USE_OPENMP
using namespace std;
using namespace arma;
// 每个像素点的样本个数默认值
// the Default Number of pixel's samples
#define DEFAULT_NUM_SAMPLES 20
// #min指数默认值
// the Default Match Number of make pixel as Background
#define DEFAULT_MIN_MATCHES 2
// Sqthere半径默认值
// the Default Radius of pixel value
#define DEFAULT_RADIUS 20
// 子采样概率默认值
// the Default the probability of random sample
#define DEFAULT_RANDOM_SAMPLE 5
//每个像素被连续检测为前景点的最大次数
#define DEFAULT_MAXFGCOUNT 50
class ViBe
{
public:
ViBe(int num_sam = DEFAULT_NUM_SAMPLES,
int min_match = DEFAULT_MIN_MATCHES,
int r = DEFAULT_RADIUS,
int rand_sam = DEFAULT_RANDOM_SAMPLE,
int max_fg_count=DEFAULT_MAXFGCOUNT);
~ViBe();
//将cv::Mat数据转换成矩阵库armadillo的Mat格式
//注意调用时arma::mat一定要初始化
void cvmat2armamat(const cv::Mat& cv_mat_in, arma::umat& arma_mat_out);
//我的实现
void armamat2cvmat(const arma::umat& arma_mat_in,cv::Mat& cv_mat_out);
//将armadillo的矩阵数据转换成opencv cv::Mat格式
template
void Arma_mat_to_cv_mat(const arma::Mat
// 背景模型初始化
// Init Background Model.
//void init(cv::Mat img);
// 处理第一帧图像
// Process First Frame of Video Query
void ProcessFirstFrame(cv::Mat img);
// 运行 ViBe 算法,提取前景区域并更新背景模型样本库
// Run the ViBe Algorithm: Extract Foreground Areas & Update Background Model Sample Library.
void Run(cv::Mat img);
// 获取前景模型二值图像
// get Foreground Model Binary Image.
cv::Mat getFGModel();
// 删除样本库
// Delete Sample Library.
//void deleteSamples();
// x的邻居点
// x's neighborhood points
int c_xoff[9];
// y的邻居点
// y's neighborhood points
int c_yoff[9];
private:
// 样本库
// Sample Library, size = img.rows * img.cols * DEFAULT_NUM_SAMPLES
ucube samples;
//每个像素连续被判定为前景点的次数
umat FGCount;
// 前景模型二值图像
// Foreground Model Binary Image
cv::Mat FGModel;
// 每个像素点的样本个数
// Number of pixel's samples
int num_samples;
// #min指数
// Match Number of make pixel as Background
int num_min_matches;
// Sqthere半径
// Radius of pixel value
int radius;
// 子采样概率
// the probability of random sample
int random_sample;
//像素被连续检测为前景点的最大次数
int max_FG_count;
//根据提供的像素行、列坐标矩阵,生成其随机选取的9领域的坐标
//返回随机领域索引的行向量
uvec getNeigbourInd(uvec& vecSelfInd,const int height,const int width );
};
#endif // VIBE_H
以下是vibe.cpp
#include "ViBe.h"
#include
//#include
/*===================================================================
* 构造函数:ViBe
* 说明:初始化ViBe算法部分参数;
* 参数:
* int num_sam: 每个像素点的样本个数
* int min_match: #min指数
* int r: Sqthere半径
* int rand_sam: 子采样概率
*------------------------------------------------------------------
* Constructed Function: ViBe
*
* Summary:
* Init several arguments of ViBe Algorithm.
*
* Arguments:
* int num_sam - Number of pixel's samples
* int min_match - Match Number of make pixel as Background
* int r - Radius of pixel value
* int rand_sam - the probability of random sample
=====================================================================
*/
ViBe::ViBe(int num_sam, int min_match, int r, int rand_sam,int max_fg_count)
{
num_samples = num_sam;
num_min_matches = min_match;
radius = r;
random_sample = rand_sam;
max_FG_count=max_fg_count;
int c_off[9] = {-1, 0, 1, -1, 1, -1, 0, 1, 0};
for(int i = 0; i < 9; i++)
c_xoff[i] = c_yoff[i] = c_off[i];
//samples=ucub();
}
/*===================================================================
* 析构函数:~ViBe
* 说明:释放样本库内存;
*------------------------------------------------------------------
* Destructor Function: ~ViBe
*
* Summary:
* Release the memory of Sample Library.
=====================================================================
*/
ViBe::~ViBe(void)
{
//deleteSamples();
}
//将cv::Mat数据转换成矩阵库armadillo的Mat格式
void ViBe:: cvmat2armamat(const cv::Mat& cv_mat_in, arma::umat& arma_mat_out)
{//convert unsigned int cv::Mat to arma::Mat
#pragma omp parallel for
for(int r=0;r
};
//我的实现
void ViBe:: armamat2cvmat(const umat& arma_mat_in,cv::Mat& cv_mat_out)
{
#pragma omp parallel for
for (int r=0;r
}
/*===================================================================
* 函数名:getNeigbourInd
* 说明:根据参数给出的原像素的行列索引向量,生成随机选取原像素邻域的行列索引向量
* 参数:uvec vecSelfInd 给出的像素自己的行列索引向量(如果是行列单独的形式,需要用sub2ind函数进行转换)
* 返回值:随机选取的邻域行列索引向量
=====================================================================
*/
uvec ViBe::getNeigbourInd(uvec& vecSelfInd,const int height,const int width )
{
//生成随机的行和列索引的偏移矩阵
imat matoffsetInd=randi
//将自己像素索引转换成行、列向量形式
umat matSelfInd=ind2sub(size(height,width),vecSelfInd);
//将偏移矩阵和自己的矩阵相加,得到邻域矩阵
imat matNeighbourRaw=matSelfInd+matoffsetInd;
//处理邻域矩阵的越界,小于0的索引要转换成0,第一行大于height的要变成height-1,第二行大于width的要变成width-1
matNeighbourRaw(find(matNeighbourRaw<0)).zeros();
uvec vecNeighbourRow=conv_to
uvec vecNeighbourCol=conv_to
vecNeighbourRow(find(vecNeighbourRow>=height)).fill(height-1);
vecNeighbourCol(find(vecNeighbourCol>=width)).fill(width-1);
//将处理后的邻域行列向量整合成一个矩阵
umat matNeighbourInd=join_vert(vecNeighbourRow.t(),vecNeighbourCol.t());
//将矩阵转换成索引向量后返回
return sub2ind(size(height,width),matNeighbourInd);
}
/*===================================================================
* 函数名:ProcessFirstFrame
* 说明:处理第一帧图像;
* 读取视频序列第一帧,并随机选取像素点邻域内像素填充样本库,初始化背景模型;
* 参数:
* Mat img: 源图像
* 返回值:void
*------------------------------------------------------------------
* Function: ProcessFirstFrame
*
* Summary:
* Process First Frame of Video Query, then select pixel's neighbourhood pixels
* randomly and fill the sample library, and init Background Model.
*
* Arguments:
* Mat img - source image
*
* Returns:
* void
=====================================================================
*/
void ViBe::ProcessFirstFrame(cv::Mat img)
{
int height=img.rows;
int width=img.cols;
//将img转换成arma矩阵
umat armaFrame(height,width);
cvmat2armamat(img,armaFrame);
//umat imgFrame=conv_to
samples.zeros(height,width,num_samples);
//初始化索引矩阵,索引矩阵为(2X(height*width)),即2行矩阵
//第一行为列对应的索引,从0到height-1,整体重复width遍
//第二行为行对应的索引,从0到width-1,每个元素顺序重复height遍。
uvec ind_row=repmat(linspace
uvec ind_col=zeros
for (int i=0;i
uvec indv_img=sub2ind(size(armaFrame),join_vert
samples.each_slice([&indv_img,&armaFrame,height,width,this](umat& eachslice)
{
uvec vecNeigbourInd=this->getNeigbourInd(indv_img,height,width);
eachslice(indv_img)=armaFrame(vecNeigbourInd);
},true);
//初始化背景模型
FGModel=cv::Mat::zeros(img.size(),CV_8UC1);
FGCount.zeros(height,width);
/*
//验证正确性
cout<<"验证samples矩阵是否正确被初始化:"<
uvec colram_=randi
for (int i=0;i<40;i++)
{
int _pixrow=rowram_(i),_pixcol=colram_(i);
cout<<"原像素及其邻域"<<_pixrow<<","<<_pixcol<
cout<<"该像素sample样本集:"<
}
/*===================================================================
* 函数名:Run
* 说明:运行 ViBe 算法,提取前景区域并更新背景模型样本库;
* 参数:
* Mat img: 源图像
* 返回值:void
*------------------------------------------------------------------
* Function: Run
*
* Summary:
* Run the ViBe Algorithm: Extract Foreground Areas & Update Background Model Sample Library.
*
* Arguments:
* Mat img - source image
*
* Returns:
* void
=====================================================================
*/
void ViBe::Run(cv::Mat img)
{
int height=img.rows;
int width=img.cols;
umat armaFrame(height,width);
cvmat2armamat(img,armaFrame);
//umat imgFrame=conv_to
umat matMatched=zeros
int nr_=radius;
samples.each_slice([&matMatched,nr_,armaFrame](umat& m)
{
uvec vecDistInd=find(abs(m-armaFrame)
},true);
matMatched(find(matMatched
//matMatched.elem(matMatched<=num_min_matches).fill(255); //再把前景点置为255
FGCount.elem(find(matMatched==0)).zeros(); //如果是背景点,则将前景点计数置位0
FGCount.elem(find(matMatched==1)) += 1; //否则前景点计数+1
FGCount.elem(find(FGCount>=max_FG_count)).zeros(); //连续max_FG_count次被检测为前景的,置位背景像素
matMatched.elem(find(FGCount==0)).zeros(); //将分割模板也置位背景点
uvec vecBGPixInd=find(matMatched==0); //所有背景点的索引向量
//每个背景点有 1 / φ的概率更新自己的样本集
uvec vecProbBGUpdateSample=randi
uvec vecBGUdSamplesInd=vecBGPixInd.elem(find(vecProbBGUpdateSample==0)); //符合更新概率条件的的背景点索引
umat matBGUdInd=ind2sub(size(matMatched),vecBGUdSamplesInd); //将索引值转换成X、Y坐标二维矩阵
uvec vecSliceBg=randi
umat matBGSamInd=join_vert(matBGUdInd,vecSliceBg.t());
uvec vecBGSamInd=sub2ind(size(samples),matBGSamInd);
//cout<<"vecBGSamInd的个数:"<
//每个背景点有 1 / φ的概率更新领域的样本集
uvec vecProbNBUpdateSample=randi
uvec vecBgSelfInd=vecBGPixInd.elem(find(vecProbNBUpdateSample==0)); //需要更新领域样本集的背景点索引
uvec vecBgNeibInd=getNeigbourInd(vecBgSelfInd,height,width); //获取这些背景点的随机领域索引向量
umat matBgNeibInd=ind2sub(size(matMatched),vecBgNeibInd);
vecSliceBg=randi
matBGSamInd=join_vert(matBgNeibInd,vecSliceBg.t());
vecBGSamInd=sub2ind(size(samples),matBGSamInd);
samples.elem(vecBGSamInd)=armaFrame(vecBgSelfInd);
//matMatched.elem(find(matMatched==1)).fill(255);
matMatched(find(matMatched==1)).fill(255);
//mat armaFGModel=conv_to
armamat2cvmat(matMatched,FGModel);
}
/*===================================================================
* 函数名:getFGModel
* 说明:获取前景模型二值图像;
* 返回值:Mat
*------------------------------------------------------------------
* Function: getFGModel
*
* Summary:
* get Foreground Model Binary Image.
*
* Returns:
* Mat
=====================================================================
*/
cv::Mat ViBe::getFGModel()
{
return FGModel;
}
/*===================================================================
* 函数名:deleteSamples
* 说明:删除样本库;
* 返回值:void
*------------------------------------------------------------------
* Function: deleteSamples
*
* Summary:
* Delete Sample Library.
*
* Returns:
* void
=====================================================================
void ViBe::deleteSamples()
{
//delete samples;
}
*/
以下是main.cpp
#include "ViBe.h"
//#include
#include
#include
#include
using namespace std;
int main(int argc, char* argv[])
{
string videofile;
if (argc<2)
{
videofile="/home/vernon/projects/opencv/BackgroundSeg/Video/Video.avi";
}
else
videofile=argv[1];
cv::Mat frame, gray, FGModel;
cv::VideoCapture capture(videofile);
if(!capture.isOpened())
{
cout<<"ERROR: Did't find this video!"<
}
// 程序运行时间统计变量
// the Time Statistical Variable of Program Running Time
double time;
double start;
ViBe vibe;
int count = 0;
while (1)
{
capture >> frame;
if (frame.empty())
continue;
cv::cvtColor(frame, gray, cv::COLOR_RGB2GRAY);
if (count==0)
{
//vibe.init(gray);
vibe.ProcessFirstFrame(gray);
cout<<"Training ViBe Success."<
else
{
start = static_cast
vibe.Run(gray);
time = ((double)cv::getTickCount() - start) / cv::getTickFrequency() * 1000;
cout << "Process "< FGModel = vibe.getFGModel(); } if ( cv::waitKey(25) == 27 ) return 0;
// morphologyEx(FGModel, FGModel, MORPH_OPEN, Mat());
cv::imshow("Vibe后视频", FGModel);
count++;
cv::imshow("原视频", frame);
break;
}
}