详细代码在这!!!
def max_filtering(N, I_temp):
wall = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
wall[(N//2):wall.shape[0]-(N//2), (N//2):wall.shape[1]-(N//2)] = I_temp.copy()
temp = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
for y in range(0,wall.shape[0]):
for x in range(0,wall.shape[1]):
if wall[y,x]!=-1:
window = wall[y-(N//2):y+(N//2)+1,x-(N//2):x+(N//2)+1]
num = np.amax(window)
temp[y,x] = num
A = temp[(N//2):wall.shape[0]-(N//2), (N//2):wall.shape[1]-(N//2)].copy()
return A
numpy.full(shape, fill_value, dtype=None, order=‘C’)
wall = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
+(N//2)*2
有点类似于padding
def min_filtering(N, A):
wall_min = np.full((A.shape[0]+(N//2)*2, A.shape[1]+(N//2)*2), 300)
wall_min[(N//2):wall_min.shape[0]-(N//2), (N//2):wall_min.shape[1]-(N//2)] = A.copy()
temp_min = np.full((A.shape[0]+(N//2)*2, A.shape[1]+(N//2)*2), 300)
for y in range(0,wall_min.shape[0]):
for x in range(0,wall_min.shape[1]):
if wall_min[y,x]!=300:
window_min = wall_min[y-(N//2):y+(N//2)+1,x-(N//2):x+(N//2)+1]
num_min = np.amin(window_min)
temp_min[y,x] = num_min
B = temp_min[(N//2):wall_min.shape[0]-(N//2), (N//2):wall_min.shape[1]-(N//2)].copy()
return B
def background_subtraction(I, B):
O = I - B
norm_img = cv2.normalize(O, None, 0,255, norm_type=cv2.NORM_MINMAX)
return norm_img
①
②
def min_max_filtering(M, N, I):
if M == 0:
#max_filtering
A = max_filtering(N, I)
#min_filtering
B = min_filtering(N, A)
#subtraction
normalised_img = background_subtraction(I, B)
elif M == 1:
#min_filtering
A = min_filtering(N, I)
#max_filtering
B = max_filtering(N, A)
#subtraction
normalised_img = background_subtraction(I, B)
return normalised_img
找到汽车的角度
import cv2
import math
class Target:
def __init__(self):
self.capture = cv2.VideoCapture(0)
cv2.namedWindow("Target", 1)
cv2.NamedWindow("Threshold1",1)
cv2.NamedWindow("Threshold2",1)
cv2.NamedWindow("hsv",1)
def run(self):
#initiate font
font = cv2.InitFont(cv2.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
#instantiate images
hsv_img=cv2.CreateImage(cv2.GetSize(cv2.QueryFrame(self.capture)),8,3)
threshold_img1 = cv2.CreateImage(cv2.GetSize(hsv_img),8,1)
threshold_img1a = cv2.CreateImage(cv2.GetSize(hsv_img),8,1)
threshold_img2 = cv2.CreateImage(cv2.GetSize(hsv_img),8,1)
i=0
writer=cv2.CreateVideoWriter("angle_tracking.avi",cv2.CV_FOURCC('M','J','P','G'),30,cv2.GetSize(hsv_img),1)
while True:
#capture the image from the cam
img=cv2.QueryFrame(self.capture)
#convert the image to HSV
cv2.CvtColor(img,hsv_img,cv2.CV_BGR2HSV)
#threshold the image to isolate two colors
cv2.InRangeS(hsv_img,(165,145,100),(250,210,160),threshold_img1) #red
cv2.InRangeS(hsv_img,(0,145,100),(10,210,160),threshold_img1a) #red again
cv2.Add(threshold_img1,threshold_img1a,threshold_img1) #this is combining the two limits for red
cv2.InRangeS(hsv_img,(105,180,40),(120,260,100),threshold_img2) #blue
#determine the moments of the two objects
threshold_img1=cv2.GetMat(threshold_img1)
threshold_img2=cv2.GetMat(threshold_img2)
moments1=cv2.Moments(threshold_img1,0)
moments2=cv2.Moments(threshold_img2,0)
area1=cv2.GetCentralMoment(moments1,0,0)
area2=cv2.GetCentralMoment(moments2,0,0)
#initialize x and y
x1,y1,x2,y2=(1,2,3,4)
coord_list=[x1,y1,x2,y2]
for x in coord_list:
x=0
#there can be noise in the video so ignore objects with small areas
if (area1 >200000):
#x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
x1=int(cv2.GetSpatialMoment(moments1,1,0)/area1)
y1=int(cv2.GetSpatialMoment(moments1,0,1)/area1)
#draw circle
cv2.Circle(img,(x1,y1),2,(0,255,0),20)
#write x and y position
cv2.PutText(img,str(x1)+","+str(y1),(x1,y1+20),font, 255) #Draw the text
if (area2 >100000):
#x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
x2=int(cv2.GetSpatialMoment(moments2,1,0)/area2)
y2=int(cv2.GetSpatialMoment(moments2,0,1)/area2)
#draw circle
cv2.Circle(img,(x2,y2),2,(0,255,0),20)
cv2.PutText(img,str(x2)+","+str(y2),(x2,y2+20),font, 255) #Draw the text
cv2.Line(img,(x1,y1),(x2,y2),(0,255,0),4,cv2.CV_AA)
#draw line and angle
cv2.Line(img,(x1,y1),(cv2.GetSize(img)[0],y1),(100,100,100,100),4,cv2.CV_AA)
x1=float(x1)
y1=float(y1)
x2=float(x2)
y2=float(y2)
angle = int(math.atan((y1-y2)/(x2-x1))*180/math.pi)
cv2.PutText(img,str(angle),(int(x1)+50,(int(y2)+int(y1))/2),font,255)
#cv2.WriteFrame(writer,img)
#display frames to users
cv2.ShowImage("Target",img)
cv2.ShowImage("Threshold1",threshold_img1)
cv2.ShowImage("Threshold2",threshold_img2)
cv2.ShowImage("hsv",hsv_img)
# Listen for ESC or ENTER key
c = cv2.WaitKey(7) % 0x100
if c == 27 or c == 10:
break
cv2.DestroyAllWindows()
if __name__=="__main__":
t = Target()
t.run()
CreateImageHeader(size, depth, channels)
创建图像标头并分配图像数据
QueryFrame(capture)
在一次调用中结合了 VideoCapture::grab() 和 VideoCapture::retrieve()。这是读取视频文件或从解码中捕获数据并返回刚刚抓取的帧的最方便的方法。如果未抓取任何帧(相机已断开连接,或者视频文件中没有更多帧),则方法返回 false,函数返回 NULL 指针。
将红色和蓝色单独分离开,红色在hsv中有两段,最终做一个加和
#threshold the image to isolate two colors
cv2.InRangeS(hsv_img,(165,145,100),(250,210,160),threshold_img1) #red
cv2.InRangeS(hsv_img,(0,145,100),(10,210,160),threshold_img1a) #red again
cv2.Add(threshold_img1,threshold_img1a,threshold_img1) #this is combining the two limits for red
cv2.InRangeS(hsv_img,(105,180,40),(120,260,100),threshold_img2) #blue
cv.InRangeS(src, lower, upper, dst)
实际基本等价于InRange
#determine the moments of the two objects
threshold_img1=cv2.GetMat(threshold_img1)
threshold_img2=cv2.GetMat(threshold_img2)
moments1=cv2.Moments(threshold_img1,0)
moments2=cv2.Moments(threshold_img2,0)
area1=cv2.GetCentralMoment(moments1,0,0)
area2=cv2.GetCentralMoment(moments2,0,0)
cv.Moments(arr, binary=0)
binary如果为 true,则所有非零图像像素都被视为 1。该参数仅用于图像。
GetCentralMoment
GetSpatialMoment
现在好像已经被淘汰,在文档已经找不到了, x2=int(cv2.GetSpatialMoment(moments2,1,0)/area2) y2=int(cv2.GetSpatialMoment(moments2,0,1)/area2)
功能是:通过将1,0和0,1矩除以面积,可以找到对象中心的x和y坐标
还是举一个例子说明不同版本下的使用:
//cv1:
moments = cv.Moments(thresholded_img, 0)
area = cv.GetCentralMoment(moments, 0, 0)
#there can be noise in the video so ignore objects with small areas
if(area > 100000):
#determine the x and y coordinates of the center of the object
#we are tracking by dividing the 1, 0 and 0, 1 moments by the area
x = cv.GetSpatialMoment(moments, 1, 0)/area
y = cv.GetSpatialMoment(moments, 0, 1)/area
//cv2
moments = cv2.moments(thresholded_img)
area = moments['m00']
#there can be noise in the video so ignore objects with small areas
if(area > 100000):
#determine the x and y coordinates of the center of the object
#we are tracking by dividing the 1, 0 and 0, 1 moments by the area
x = moments['m10'] / area
y = moments['m01'] / area
GetCentralMoment,GetSpatialMoment在opencv2可以直接使用索引进行获取
import cv2
import numpy as np
import time
import tkinter as tk
from tkinter import ttk
NORM_FONT= ("Verdana", 10)
def popupmsg(msg):
popup = tk.Tk()
popup.wm_title("Message")
label = ttk.Label(popup, text=msg, font=NORM_FONT)
label.pack(side="top", fill="x", pady=10)
B1 = ttk.Button(popup, text="Okay", command = popup.destroy)
B1.pack()
popup.mainloop()
cascade_src = 'cascade/cars.xml'
video_src = 'dataset/cars.mp4'
cap = cv2.VideoCapture(video_src)
car_cascade = cv2.CascadeClassifier(cascade_src)
deltatsum = 0
n = 0
last_time = time.time()
while(1):
# Take each frame
_, frame = cap.read()
cars = car_cascade.detectMultiScale(frame, 1.1, 1)
for (x,y,w,h) in cars:
roi = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2) #ROI is region of interest
#blur the frame to get rid of noise. the kernel should be ODD
frame = cv2.GaussianBlur(frame,(21,21),0)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of color in HSV
lower_limit = np.array([0,150,150])
upper_limit = np.array([10,255,255])
# Threshold the HSV image to get only the thresholded colors
# mask is a binary image
mask = cv2.inRange(hsv, lower_limit, upper_limit)
# The dimensions of the kernel must be odd!
kernel = np.ones((3,3),np.uint8)
kernel_lg = np.ones((15,15),np.uint8)
# erode the mask to get rid of noise
mask = cv2.erode(mask,kernel,iterations = 1)
# dialate it back to regain some lost area
mask = cv2.dilate(mask,kernel_lg,iterations = 1)
# Bitwise-AND the mask and grayscale image so we end up with our areas of interest and black everywhere else
result = cv2.bitwise_and(frame,frame, mask= mask)
thresh = mask
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# define a minimum area for a contour - if it's below this, ignore it
min_area = 1000
cont_filtered = []
# filter out all contours below a min_area
for cont in contours:
if cv2.contourArea(cont) > min_area:
cont_filtered.append(cont)
#print(cv2.contourArea(cont))
try:
cnt = cont_filtered[0]
# draw the rectangle surrounding the filtered contour
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,0,255),2)
rows,cols = thresh.shape[:2]
[vx,vy,x,y] = cv2.fitLine(cnt, cv2.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
righty = int(((cols-x)*vy/vx)+y)
cv2.line(frame,(cols-1,righty),(0,lefty),(0,255,0),2)
# this would draw all the contours on the image, not just the ones from cont_filtered
#cv2.drawContours(frame, cont_filtered, -1, (0,255,0), 3)
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
(x,y),(MA,ma),angle = cv2.fitEllipse(cnt)
print('x= ', cx, ' y= ', cy, ' angle = ', round(rect[2],2))
if(round(rect[2],2))<-45:
popupmsg('Lane change detected')
#print(contours)
#print('there are contours')
except:
print('no contours')
cv2.imshow('frame',frame)
#cv2.imshow('mask', mask)
#cv2.imshow('thresh',thresh)
#cv2.imshow('im2', im2)
cv2.imshow('result', result)
k = cv2.waitKey(5) & 0xFF
if k == ord('q'):
break
deltat = time.time() - last_time
last_time = time.time()
deltatsum += deltat
n += 1
freq = round(1/(deltatsum/n), 2)
# print('Updating at ' + str(freq) + ' FPS\r', end='')
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
import tkinter as tk
from tkinter import ttk
使用了tkinter作为经典ui设计,详细介绍!!!
1.CascadeClassifier(级联分类器)
2.detectMultiScale函数
有多个重载形式,且c++和python传入参数形式有一定区别
输入为灰度形式