图像分割定义:将前景物体从背景中分离出来
图像分割方法:
基于深度学习的图像分割方法
问题:图像存在过多的极小区域而产生许多小的集水盆,使得图像分割太碎,不利于处理
处理步骤:
watershed(img,masker)
矩离变换:distanceTransform(img,distanceType,maskSize)
求连通域:connectedComponents(img,connectivity,…)
import cv2
import numpy as np
import matplotlib import pyplot as plt
#获取背景
#1.通过二值化得到黑白图片
#2.通过形态学获取背景
img = cv2.imread('water.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY_INV + cv2.THRESH_OTSU)
ret, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)
#开运算
kernel = np.ones((3,3),np.int8)
open1 = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations = 2)
#膨胀
bg = cv2.dilate(open1, kernel, iteratons = 1)
#获取前景物体
dist = cv2.distanceTransform(open1,cv2.DIST_L2,5)
reg, fg = cv2.threshold(dist, 0.7 * dist.max(), 255, 0)
#获取未知区域
fg = np.unit8(fg)
unknow = cv2.subtract(bg, fg)
#创建连通域
ret, marker = cv2.connectedComponents(fg)
marker = marker + 1
marker[unknow == 255] = 0
#进行图像分割
result = cv2.watershed(img, marker)
img[result == -1] = [0, 0, 255]
cv2.imshow('thresh',thresh)
cv2.waitKey(0)
import cv2
import numpy as np
class App:
flag_rect = False
rect = (0,0,0)
startX = 0
stratY = 0
def onmouse(self, event, x, y, flag, param):
if event = cv2.EVENT_LBUTTONDOWN:
self.flag_rect = True
self.stratX = x
self.stratY = y
elif event == cv2.EVENT_LBUTTONUP:
self.flag_rect = Flase
cv2.rectangle(self.img, (self.startX,self.startY),(x,y),(0,0,255),3)
self.rect(min(self.startX,x), min(self.startY,y),abs(self.startX-x),abs(self.startY-y))
elif event == cv2.EVENT_MOUSEMOVE:
if self.flag_rect == True:
self.img = self.img2.copy()
cv2.rectangle(self.img, (self.startX,self.startY),(x,y),(255,0,0),3)
print("onmouse")
def run(self):
cv2.namedWindow('input')
cv2.setMouseCallback('input', self.onmouse)
self.img = cv2.imread('lena.png')
self.img2 = self.img.copy()
self.mask - np.zeros(self.img.shape[:2], dtype = np.uint8)
self.output = np.zeros(self.img.shape,np.uint8)
while(1):
cv2.inshow('input', img)
k = cv2.waitKay()
if k == 27:
break
if k == ord('g'):
bgdModel = np.zeros((1,65), np.float64)
fdgModel = np.zeros((1,65), np.float64)
cv2.grabCut(self.img2, self.mask,self.rect,bgdModel,fdgModel,1,cv2.GC_INIT_WITH_RECT)
mask = np.where((self.mask == 1) | (self.mask == 3), 255, 0).astype('uint8')
self.output = cv2.bitwise_and(self.img2, self.img2, mask = mask2)
App().run()
mean_min = cv2.pyrMeanShiftFiltering(img, 20, 30)
imgCanny = cv2.Canny(mean_img, 150, 300)
contours, _ = cv2.findContours(imgCanny, cv2.RETR_EXTERNANAL, CV2.CHAIN_APPROX_SIMPLE)
视频是一组连续的帧
帧与帧之间的关系密切(GOP)
在GOP中,背景几乎是不变的
cap = cv2.VideoCapture('vtest.avi')
mog = cv2.bgsegm.createBackgroundSubstractorMOG()
while(True):
ret, frame = cap.read()
fgmask = mog.apply(frame)
cv2.imshow('img',fgmask)
k = cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
同MOG类似,不过对亮度产生的阴影有更好的识别
优点:可以计算出阴影部分
缺点:会产生很多噪点
createBackgroundSubstractorMOG2(history, detectShadows)
静态背景图像估计和每个像素的贝叶斯分割抗造型更强
createBackgroundSubstractorGMG(initializationFrames)
优点:可以计算出阴影部分。同时减少噪点
缺点:如果采用默认值,则在开始一段时间内没有任何信息显示
解决办法:调整初始参考帧数
det = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)