通过计算两帧图像之间变化了的像素点占的百分比,来确定图像中是否有动作产生。
这里主要用到 Absdiff 函数,比较两帧图像之间有差异的点,当然需要将图像进行一些处理,例如平滑处理,灰度化处理,二值化处理,经过处理之后的二值图像上的点将更有效。
代码示例:
import cv2.cv as cv
capture=cv.CaptureFromCAM(0)
frame1 = cv.QueryFrame(capture)
frame1gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
cv.CvtColor(frame1, frame1gray, cv.CV_RGB2GRAY)
res = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
frame2gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)
w= frame2gray.width
h= frame2gray.height
nb_pixels = frame2gray.width * frame2gray.height
while True:
frame2 = cv.QueryFrame(capture)
cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY)
cv.AbsDiff(frame1gray, frame2gray, res)
cv.ShowImage("After AbsDiff", res)
cv.Smooth(res, res, cv.CV_BLUR, 5,5)
element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT)
cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN)
cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE)
cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY_INV)
cv.ShowImage("Image", frame2)
cv.ShowImage("Res", res)
#-----------
nb=0
for y in range(h):
for x in range(w):
if res[y,x] == 0.0:
nb += 1
avg = (nb*100.0)/nb_pixels
#print "Average: ",avg, "%\r",
if avg >= 5:
print "Something is moving !"
#-----------
cv.Copy(frame2gray, frame1gray)
c=cv.WaitKey(1)
if c==27: #Break if user enters 'Esc'.
break
背景建模也是检测运动物体的一种办法,下面是代码示例:
import cv2.cv as cv
capture = cv.CaptureFromCAM(0)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
gray = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)
background = cv.CreateMat(height, width, cv.CV_32F)
backImage = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)
foreground = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)
output = cv.CreateImage((width,height), 8, 1)
begin = True
threshold = 10
while True:
frame = cv.QueryFrame( capture )
cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
if begin:
cv.Convert(gray, background) #Convert gray into background format
begin = False
cv.Convert(background, backImage) #convert existing background to backImage
cv.AbsDiff(backImage, gray, foreground) #Absdiff to get differences
cv.Threshold(foreground, output, threshold, 255, cv.CV_THRESH_BINARY_INV)
cv.Acc(foreground, background,output) #Accumulate to background
cv.ShowImage("Output", output)
cv.ShowImage("Gray", gray)
c=cv.WaitKey(1)
if c==27: #Break if user enters 'Esc'.
break
上面的几种办法我都试了下,基本上能识别出运动的物体,但是发现总是有点瑕疵,所以又比对了几种别人的方案,然后合成了一个自己的方案:
具体处理思路:
对两帧图像做一个absdiff得到新图像。
对新图像做灰度和二值化处理。
使用findContours函数获取二值化处理之后的图片中的轮廓。
使用contourArea()过滤掉自己不想要的面积范围的轮廓。
这个办法基本上能够检测出物体的图像中物体的移动,而且我觉得通过设定contourArea()函数的过滤范围,可以检测距离摄像头不同距离范围的运动物体。
以下是代码示例:
#!usr/bin/env python
#coding=utf-8
import cv2
import numpy as np
camera = cv2.VideoCapture(0)
width = int(camera.get(3))
height = int(camera.get(4))
firstFrame = None
while True:
(grabbed, frame) = camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# thresh = cv2.adaptiveThreshold(frameDelta,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,11,2)
# thresh = cv2.adaptiveThreshold(frameDelta,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
# cv2.THRESH_BINARY,11,2)
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < 10000:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Security Feed", frame)
firstFrame = gray.copy()
camera.release()
cv2.destroyAllWindows()