通过金字塔Lucas-Kanade 光流方法计算某些点集的光流(稀疏光流)。
相关论文:”Pyramidal Implementation of the Lucas Kanade Feature TrackerDescription of the algorithm”
环境:python3+opencv3
#coding=utf-8
import numpy as np
import cv2
# from common import anorm2, draw_str
from time import clock
import cmath
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# maxCorners : 设置最多返回的关键点数量。
# qualityLevel : 反应一个像素点强度有多强才能成为关键点。
# minDistance : 关键点之间的最少像素点。
# blockSize : 计算一个像素点是否为关键点时所取的区域大小。
# useHarrisDetector :使用原声的 Harris 角侦测器或最小特征值标准。
# k : 一个用在Harris侦测器中的自由变量。
feature_params = dict(maxCorners=5000000,
qualityLevel=0.1,
minDistance=7,
blockSize=7)
class App:
def __init__(self, video_src): # 构造方法,初始化一些参数和视频路径
self.track_len = 10
self.detect_interval = 1
self.tracks = []
self.cam = cv2.VideoCapture(video_src)
self.frame_idx = 0
self.num = 0
self.i = 0
self.all_distance = 0
self.count = 0
def run(self): # 光流运行方法
while True:
ret, frame = self.cam.read() # 读取视频帧
if ret == True:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 转化为灰度虚图像
# vis = frame.copy()
h, w = frame.shape[:2]
vis = np.ones((h, w), )
f = open('./F/8/shuibo_8_LK(x1,y1,x2,y2).txt','w')
if len(self.tracks) > 0: # 检测到角点后进行光流跟踪
img0, img1 = self.prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2)
"""
nextPts, status, err = calcOpticalFlowPyrLK(prevImg, nextImg, prevPts[, nextPts[, status[,
err[, winSize[, maxLevel[, criteria[, flags[, minEigThreshold]]]]]]]])
参数说明:
prevImage 前一帧8-bit图像
nextImage 当前帧8-bit图像
prevPts 待跟踪的特征点向量
nextPts 输出跟踪特征点向量
status 特征点是否找到,找到的状态为1,未找到的状态为0
err 输出错误向量,(不太理解用途...)
winSize 搜索窗口的大小
maxLevel 最大的金字塔层数
flags 可选标识:OPTFLOW_USE_INITIAL_FLOW OPTFLOW_LK_GET_MIN_EIGENVALS
"""
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None,
**lk_params) # 前一帧的角点和当前帧的图像作为输入来得到角点在当前帧的位置
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None,
**lk_params) # 当前帧跟踪到的角点及图像和前一帧的图像作为输入来找到前一帧的角点位置
d = abs(p0 - p0r).reshape(-1, 2).max(-1) # 得到角点回溯与前一帧实际角点的位置变化关系
# good = d < 1 # 判断d内的值是否小于1,大于1跟踪被认为是错误的跟踪点
good=d
new_tracks = []
for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): # 将跟踪正确的点列入成功跟踪点
if not good_flag:
continue
tr.append((x, y))#tr是前一帧的角点,与当前帧的角点(x,y)合并。标志为good_flag
if len(tr) > self.track_len:
del tr[0]
new_tracks.append(tr)
cv2.circle(vis, (x, y), 2, (0, 255, 0), -1)#当前帧角点画圆
self.tracks = new_tracks #self.tracks中的值的格式是:(前一帧角点)(当前帧角点)
# print(self.tracks[0])
# print(self.tracks[1])
distance = 0
for tr in self.tracks:
# tr[0]=list(tr[0])
# tr[1]=list(tr[1])
x1=tr[0][0]
y1=tr[0][1]
x2 = tr[1][0]
y2 = tr[1][1]
f.writelines([ str(x1), ' ', str(y1), ' ', str(x2), ' ', str(y2),'\n'])
dis=cmath.sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1))
#正确追踪的点的个数
print(len(self.tracks))
#每一个正确追踪的点的像素点的位移
print(dis.real)
distance=distance+dis
distance=distance/len(self.tracks)
self.all_distance=self.all_distance+distance
self.count=self.count+1
print("每一帧像素点平均位移:",distance,"第几帧:",self.count)
print("所有帧平均位移:",(self.all_distance/self.count).real)
f.close()
if self.frame_idx % self.detect_interval == 0: #每1帧检测一次特征点
mask = np.zeros_like(frame_gray) # 初始化和视频大小相同的图像
mask[:] = 255 # 将mask赋值255也就是算全部图像的角点
for x, y in [np.int32(tr[-1]) for tr in self.tracks]: #跟踪的角点画圆
cv2.circle(mask, (x, y), 5, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params) # 像素级别角点检测
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
self.tracks.append([(x, y)]) # 将检测到的角点放在待跟踪序列中
self.frame_idx += 1
self.prev_gray = frame_gray
cv2.imshow('lk_track', vis)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
# cv2.imwrite("./mashiti-result4.png", vis)
break
def main():
import sys
try:
video_src = sys.argv[1]
except:
video_src = "./F/8/shuibo_8.avi"
print
__doc__
App(video_src).run()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
用Gunnar Farneback 的算法计算稠密光流(即图像上全部像素点的光流都计算出来)
相关论文:“Two-Frame Motion Estimation Based on PolynomialExpansion”
环境:python3+opencv3
#coding=utf-8
import cv2
import numpy as np
from numpy import *
import cmath
from skimage import transform
import matplotlib.pyplot as plt
cap = cv2.VideoCapture("./F/8/shuibo_8.avi")
i=0
num=0
number = 0
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
def draw_flow(im, flow, step=1):
h, w = im.shape[:2]
y, x = mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1)
x = x.astype('int64')
y = y.astype('int64')
fx, fy = flow[y, x].T
for index in range(h*w):
f.writelines([str(x[index]),' ',str(y[index]),' ',str(fx[index]),' ',str(fy[index]),'\n'])
# create line endpoints
lines = vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = int32(lines)
vis=np.ones((h,w),)
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
cv2.arrowedLine(vis,(x1,y1),(x2,y2),color=(0,0,255),tipLength=1)
return vis
while(1):
ret, frame2 = cap.read()
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
h, w = next.shape[:2]
# 返回一个两通道的光流向量,实际上是每个点的像素位移值
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# x方向:flow[...,0]
# y方向:flow[...,1]
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) #笛卡尔坐标转换为极坐标
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# plt=draw_flow(next, flow)
# plt.show()
f=open('./F/8/shuibo_8_Farneback(x,y,fx,fy).txt','w')
vis = np.ones((h,w), )
vis=draw_flow(next, flow)
f.close()
cv2.imshow('Optical flow', vis) #划线显示光流
# cv2.imshow('frame2', rgb) #hsv坐标显示光流
prvs = next
cv2.waitKey()
cv2.destroyAllWindows()
通过块匹配的方法来计算光流。
环境:python2+opencv2
import cv
import sys
import cv2
import numpy as np
FLOWSKIP = 1
# if len(sys.argv) != 3:
# sys.exit("Please input two arguments: imagename1 imagename2")
# inputImageFirst = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
# inputImageSecond = cv.LoadImage(sys.argv[2], cv.CV_LOAD_IMAGE_GRAYSCALE)
inputImageFirst = cv.LoadImage('./F/0.5/shuibo_9.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
inputImageSecond = cv.LoadImage('./F/0.5/shuibo_10.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
# desImageHS = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
# desImageLK = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
desImageHS = cv.LoadImage('./F/0.5/shuibo_9.jpg', cv.CV_LOAD_IMAGE_COLOR)
desImageLK = cv.LoadImage('./F/0.5/shuibo_9.jpg', cv.CV_LOAD_IMAGE_COLOR)
cols = inputImageFirst.width
rows = inputImageFirst.height
print cols,rows
velx = cv.CreateMat(rows, cols, cv.CV_32FC1)
vely = cv.CreateMat(rows, cols, cv.CV_32FC1)
cv.SetZero(velx)
cv.SetZero(vely)
cv.CalcOpticalFlowBM(inputImageFirst, inputImageSecond, (1,1),(1,1),(1,1) ,0,velx, vely)
f=open('./F/0.5/shuibo_05_BM(x1,y1,x1,y2).txt','w')#将光流保存到txt文件中
count=0
for i in range(0, cols, FLOWSKIP):
for j in range(0, rows, FLOWSKIP):
dx = int(cv.GetReal2D(velx, j, i))
dy = int(cv.GetReal2D(vely, j, i))
cv.Line(desImageHS, (i, j), (i + dx, j + dy), (0, 0, 255), 1, cv.CV_AA, 0)
f.writelines([ str(i), ' ', str(j), ' ', str(i + dx), ' ', str(j + dy),'\n'])
# count+=1
# print count
f.close()
cv.SaveImage("resultHS.png", desImageHS)
cv.NamedWindow("Optical flow HS")
cv.ShowImage("Optical flow HS", desImageHS)
cv.WaitKey(0)
cv.DestroyAllWindows()
用Horn-Schunck 的算法计算稠密光流。
环境:python2+opencv2
import cv
import sys
import cv2
import numpy as np
FLOWSKIP = 1
# if len(sys.argv) != 3:
# sys.exit("Please input two arguments: imagename1 imagename2")
# inputImageFirst = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
# inputImageSecond = cv.LoadImage(sys.argv[2], cv.CV_LOAD_IMAGE_GRAYSCALE)
inputImageFirst = cv.LoadImage('./A/8.0/shuibo_9.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
inputImageSecond = cv.LoadImage('./A/8.0/shuibo_10.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
# desImageHS = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
# desImageLK = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
desImageHS = cv.LoadImage('./A/8.0/shuibo_9.jpg', cv.CV_LOAD_IMAGE_COLOR)
desImageLK = cv.LoadImage('./A/8.0/shuibo_9.jpg', cv.CV_LOAD_IMAGE_COLOR)
cols = inputImageFirst.width
rows = inputImageFirst.height
velx = cv.CreateMat(rows, cols, cv.CV_32FC1)
vely = cv.CreateMat(rows, cols, cv.CV_32FC1)
cv.SetZero(velx)
cv.SetZero(vely)
cv.CalcOpticalFlowHS(inputImageFirst, inputImageSecond, False, velx, vely, 100.0,
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 64, 0.01))
f=open('./A/8.0/shuibo_8_HS(x1,y1,x1,y2).txt','w')
count=0
for i in range(0, cols, FLOWSKIP):
for j in range(0, rows, FLOWSKIP):
dx = int(cv.GetReal2D(velx, j, i))
dy = int(cv.GetReal2D(vely, j, i))
cv.Line(desImageHS, (i, j), (i + dx, j + dy), (0, 0, 255), 1, cv.CV_AA, 0)
f.writelines([ str(i), ' ', str(j), ' ', str(i + dx), ' ', str(j + dy),'\n'])
# count+=1
# print count
f.close()
cv.SetZero(velx)
cv.SetZero(vely)
#包含了实现CalcOpticalFlowLK的方法
cv.CalcOpticalFlowLK(inputImageFirst, inputImageSecond, (15, 15), velx, vely)
for i in range(0, cols, FLOWSKIP):
for j in range(0, rows, FLOWSKIP):
dx = int(cv.GetReal2D(velx, j, i))
dy = int(cv.GetReal2D(vely, j, i))
cv.Line(desImageLK, (i, j), (i + dx, j + dy), (0, 0, 255), 1, cv.CV_AA, 0)
cv.SaveImage("resultHS.png", desImageHS)
cv.SaveImage("resultLK.png", desImageLK)
cv.NamedWindow("Optical flow HS")
cv.ShowImage("Optical flow HS", desImageHS)
cv.NamedWindow("Optical flow LK")
cv.ShowImage("Optical flow LK", desImageLK)
cv.WaitKey(0)
cv.DestroyAllWindows()