主要功能:
通过海康威视视频服务器读取视频流,采用opencv对视频流进行处理,
采用红色的方框框出镜头中运动的物体轮廓.
一、下载对应电脑版本的海康威视SDK
海康威视官网
二、配置环境
1.将海康威视提供的库文件安装到/usr/lib和/lib下
具体方法
sudo chmod 777 lib/*
sudo cp lib/* /usr/lib/
sudo cp lib/* /lib/
cd /usr/lib
sudo ldconfig
cd /lib/
sudo ldconfig
三、代码
1.motion_detection.cpp
#include
#include
#include
#include
#include "HCNetSDK.h"
#include "public.h"
#include "LinuxPlayM4.h"
#include
#include
#include
#include
#include
#include
#include
#define USECOLOR 1
using namespace std;
using namespace cv;
//--------------------------------------------
int iPicNum=1;//Set channel NO.
LONG nPort=-1;
HWND hWnd=0;
const double MHI_DURATION = 0.5;
const double MAX_TIME_DELTA = 0.5;
const double MIN_TIME_DELTA = 0.05;
const int N = 3;
//
const int CONTOUR_MAX_AERA = 16;
// ring image buffer
IplImage **buf = 0;
int last = 0;
// temporary images
IplImage *mhi = 0; // MHI: motion history image
//CvFilter filter = CV_GAUSSIAN_5x5;
CvConnectedComp *cur_comp, min_comp;
CvConnectedComp comp;
CvMemStorage *storage;
CvPoint pt[4];
// 参数:
// img – 输入视频帧
// dst – 检测结果
void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
double timestamp = clock()/100.; // get current time in seconds
CvSize size = cvSize(img->width,img->height); // get current frame size
int i, j, idx1, idx2;
IplImage* silh;
uchar val;
float temp;
IplImage* pyr = cvCreateImage( cvSize((size.width & -2)/2, (size.height & -2)/2), 8, 1 );
CvMemStorage *stor;
CvSeq *cont, *result, *squares;
CvSeqReader reader;
Mat mat_mhi;
if( !mhi || mhi->width != size.width || mhi->height != size.height )
{
if( buf == 0 )
{
buf = (IplImage**)malloc(N*sizeof(buf[0]));
memset( buf, 0, N*sizeof(buf[0]));
}
for( i = 0; i < N; i++ )
{
cvReleaseImage( &buf[i] );
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvZero( buf[i] );
}
cvReleaseImage( &mhi );
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
cvZero( mhi ); // clear MHI at the beginning
} // end of if(mhi)
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
idx1 = last;
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
last = idx2;
// 做帧差
silh = buf[idx2];
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
// 对差图像做二值化
cvThreshold( silh, silh, 30, 255, CV_THRESH_BINARY ); // and threshold it
Mat mat_silh=cvarrToMat(silh);
cv::motempl::updateMotionHistory( mat_silh, mat_mhi, timestamp, MHI_DURATION ); // update MHI
*mhi=IplImage(mat_mhi);
cvCvtScale( mhi, dst, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION );
cvCvtScale( mhi, dst, 255./MHI_DURATION, 0 );
// 中值滤波,消除小的噪声
cvSmooth( dst, dst, CV_MEDIAN, 3, 0, 0, 0 );
// 向下采样,去掉噪声
cvPyrDown( dst, pyr, 7 );
cvDilate( pyr, pyr, 0, 1 ); // 做膨胀操作,消除目标的不连续空洞
cvPyrUp( pyr, dst, 7 );
//
// 下面的程序段用来找到轮廓
//
// Create dynamic structure and sequence.
stor = cvCreateMemStorage(0);
cont = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint) , stor);
// 找到所有轮廓
cvFindContours( dst, stor, &cont, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
// 直接使用CONTOUR中的矩形来画轮廓
for(;cont;cont = cont->h_next)
{
CvRect r = ((CvContour*)cont)->rect;
if(r.height * r.width > CONTOUR_MAX_AERA) // 面积小的方形抛弃掉
{
cvRectangle( img, cvPoint(r.x,r.y),
cvPoint(r.x + r.width, r.y + r.height),
CV_RGB(255,0,0), 1, CV_AA,0);
}
}
// free memory
cvReleaseMemStorage(&stor);
cvReleaseImage( &pyr );
}
//change vedio format from yv12 to YUV
void yv12toYUV(char *outYuv, char *inYv12, int width, int height,int widthStep)
{
int col,row;
unsigned int Y,U,V;
int tmp;
int idx;
for (row=0; rowint rowptr=row*width;
for (col=0; col//int colhalf=col>>1;
tmp = (row/2)*(width/2)+(col/2);
Y=(unsigned int) inYv12[row*width+col];
U=(unsigned int) inYv12[width*height+width*height/4+tmp];
V=(unsigned int) inYv12[width*height+tmp];
outYuv[idx+col*3] = Y;
outYuv[idx+col*3+1] = U;
outYuv[idx+col*3+2] = V;
}
}
}
//解码回调 视频为YUV数据(YV12),音频为PCM数据
void CALLBACK DecCBFun(int nPort,char * pBuf,int nSize,FRAME_INFO * pFrameInfo, int nReserved1,int nReserved2)
{
long lFrameType = pFrameInfo->nType;
if(lFrameType ==T_YV12) //YV12
{
IplImage* pImgYCrCb = cvCreateImage(cvSize(pFrameInfo->nWidth,pFrameInfo->nHeight), IPL_DEPTH_8U, 3);//得到图像的Y分量
yv12toYUV(pImgYCrCb->imageData, pBuf, pFrameInfo->nWidth,pFrameInfo->nHeight,pImgYCrCb->widthStep);//得到全部RGB图像
//申请内存
IplImage* pImg = cvCreateImage(cvSize(pFrameInfo->nWidth,pFrameInfo->nHeight), IPL_DEPTH_8U, 3);
IplImage* motion = cvCreateImage(cvSize(pFrameInfo->nWidth,pFrameInfo->nHeight), IPL_DEPTH_8U, 1);
cvCvtColor(pImgYCrCb,pImg,CV_YCrCb2RGB);
update_mhi( pImg, motion, 60 );//detect motion
cvShowImage( "Motion", pImg );
cvWaitKey(1);
cvReleaseImage(&pImgYCrCb);
cvReleaseImage(&pImg);
}
if(lFrameType ==T_AUDIO16)
{
//PCM
}
}
///实时流回调
void CALLBACK fRealDataCallBack(LONG lRealHandle,DWORD dwDataType,BYTE *pBuffer,DWORD dwBufSize,void *pUser)
{
DWORD dRet;
switch (dwDataType)
{
case NET_DVR_SYSHEAD: //系统头
if (!PlayM4_GetPort(&nPort)) //获取播放库未使用的通道号
{
break;
}
if(dwBufSize > 0)
{
if (!PlayM4_OpenStream(nPort,pBuffer,dwBufSize,1024*1024))
{
dRet=PlayM4_GetLastError(nPort);
break;
}
//设置解码回调函数 只解码不显示
if (!PlayM4_SetDecCallBack(nPort,DecCBFun))
{
dRet=PlayM4_GetLastError(nPort);
break;
}
//打开视频解码
if (!PlayM4_Play(nPort,hWnd))
{
dRet=PlayM4_GetLastError(nPort);
break;
}
//打开音频解码, 需要码流是复合流
if (!PlayM4_PlaySoundShare(nPort))
{
dRet=PlayM4_GetLastError(nPort);
break;
}
}
break;
case NET_DVR_STREAMDATA: //码流数据 复合流/视频音频
if (dwBufSize > 0 && nPort != -1)
{
BOOL inData=PlayM4_InputData(nPort,pBuffer,dwBufSize);
while (!inData)
{
waitKey(0);
inData=PlayM4_InputData(nPort,pBuffer,dwBufSize);
printf("PlayM4_InputData failed \n");
}
}
break;
}
}
void CALLBACK g_ExceptionCallBack(DWORD dwType, LONG lUserID, LONG lHandle, void *pUser)
{
char tempbuf[256] = {0};
switch(dwType)
{
case EXCEPTION_RECONNECT: //预览时重连
printf("----------reconnect--------%ld\n", time(NULL));
break;
default:
break;
}
}
int main(int argc,char ** argv) {
//---------------------------------------
// 初始化
NET_DVR_Init();
//设置连接时间与重连时间
NET_DVR_SetConnectTime(2000, 1);
NET_DVR_SetReconnect(10000, true);
// 注册设备
LONG lUserID;
NET_DVR_DEVICEINFO_V30 struDeviceInfo;
lUserID = NET_DVR_Login_V30("192.168.1.111", 8000, "admin", "12345", &struDeviceInfo);
if (lUserID < 0)
{
printf("Login error, %d\n", NET_DVR_GetLastError());
NET_DVR_Cleanup();
return HPR_ERROR;
} else
printf("Login success!\n");
//---------------------------------------
//设置异常消息回调函数
NET_DVR_SetExceptionCallBack_V30(0, NULL,g_ExceptionCallBack, NULL);
//---------------------------------------
//启动预览并设置回调数据流
NET_DVR_CLIENTINFO ClientInfo;
ClientInfo.lChannel = 1; //Channel number 设备通道号
ClientInfo.hPlayWnd = hWnd; //窗口为空,设备SDK不解码只取流
ClientInfo.lLinkMode = 0; //Main Stream
ClientInfo.sMultiCastIP = NULL;
LONG lRealPlayHandle;
lRealPlayHandle = NET_DVR_RealPlay_V30(lUserID,&ClientInfo,fRealDataCallBack,NULL,FALSE);
if (lRealPlayHandle<0)
{
printf("pyd1---NET_DVR_RealPlay_V30 error %d\n", NET_DVR_GetLastError());
return HPR_ERROR;
}
cvNamedWindow("Motion",CV_WINDOW_NORMAL);//could resize the window
sleep(-1);
//---------------------------------------
//关闭预览
cvDestroyWindow( "Motion" );
if(!NET_DVR_StopRealPlay(lRealPlayHandle))
{
printf("NET_DVR_StopRealPlay error! Error number: %d\n", NET_DVR_GetLastError());
return HPR_ERROR;
}
printf("close preview\n");
//注销用户
NET_DVR_Logout(lUserID);
NET_DVR_Cleanup();
return 0;
}
2.CMakeLists.txt
cmake_minimum_required(VERSION 2.8.3)
project(motion_detection)
if(COMMAND cmake_policy)
cmake_policy(SET CMP0003 NEW)
endif(COMMAND cmake_policy)
find_package(catkin REQUIRED COMPONENTS
cv_bridge
image_transport
roscpp
sensor_msgs
)
find_package( OpenCV REQUIRED )
catkin_package(
INCLUDE_DIRS include
LIBRARIES motion_detection
CATKIN_DEPENDS cv_bridge image_transport roscpp sensor_msgs
# DEPENDS system_lib
)
include_directories(
${catkin_INCLUDE_DIRS}
include
)
add_executable(motion_detection src/motion_detection.cpp)
target_link_libraries(motion_detection
${catkin_LIBRARIES}
${OpenCV_LIBS}
libPlayCtrl.so
libAudioRender.so
libSuperRender.so
libhcnetsdk.so
)
3.编译
mkdir build
cd build
cmake ..
make
4.运行
./motion_detection