图像分割: 将前景物体从背景中分离出来。
图像分割的方法:
1、分水岭法的原理:
2、分水岭法的问题:
图像存在过多的极小区域,从而产生许多小的集水盆。
3、分水岭法的基本步骤:
4、实战:分割硬币
watershed()
用法:
cv2.watershed(image, markers)
参数说明:
原图像:
获取背景:
获取前景:
获取未知域:
4.1 获取背景:
# 获取背景
# 1.通过二值法得到黑白图像
# 2.通过形态学获取背景
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# 开运算
kernel = np.ones((3, 3), np.int8)
open1 = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# 膨胀
bg = cv2.dilate(open1, kernel, iterations=1)
cv2.imshow('thresh', thresh)
cv2.imshow('bg', bg)
4.2 获取前景:
距离变换:distanceTransform()
用法
cv2.distanceTransform(src, distanceType, maskSize, dst: None, dstType: None)
参数说明:
# 获取前景
dist = cv2.distanceTransform(open1, cv2.DIST_L2, 5)
ret, fg = cv2.threshold(dist, 0.7*dist.max(), 255, cv2.THRESH_BINARY)
# plt.imshow(dist, cmap='gray')
# plt.show()
# exit()
cv2.imshow('dist', dist)
cv2.imshow('fg', fg)
4.3 获取未知域:
求连通域:connectedComponents()
用法
cv2.connectedComponents(image, labels: None, connectivity: None, ltype: None)
参数说明:
# 获取未知域
fg = np.uint8(fg)
unknown = cv2.subtract(bg, fg)
# 创建连通域
ret, marker = cv2.connectedComponents(fg)
cv2.imshow('unknown', unknown)
4.4 进行分割:
# 进行分割
result = cv2.watershed(img, marker)
img[result == -1] = [0, 0, 255]
cv2.imshow('img', img)
代码实现(完整代码):
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../resource/money.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 获取背景
# 1.通过二值法得到黑白图像
# 2.通过形态学获取背景
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# 开运算
kernel = np.ones((3, 3), np.int8)
open1 = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# 膨胀
bg = cv2.dilate(open1, kernel, iterations=1)
# 获取前景
dist = cv2.distanceTransform(open1, cv2.DIST_L2, 5)
ret, fg = cv2.threshold(dist, 0.7 * dist.max(), 255, cv2.THRESH_BINARY)
# plt.imshow(dist, cmap='gray')
# plt.show()
# exit()
# 获取未知域
fg = np.uint8(fg)
unknown = cv2.subtract(bg, fg)
# 创建连通域
ret, marker = cv2.connectedComponents(fg)
marker += 1
marker[unknown == 255] = 0
# 进行分割
result = cv2.watershed(img, marker)
img[result == -1] = [0, 0, 255]
# cv2.imshow('thresh', thresh)
# cv2.imshow('bg', bg)
# cv2.imshow('dist', dist)
# cv2.imshow('fg', fg)
# cv2.imshow('unknown', unknown)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
GrabCut:通过交互的方式获得前景物体。
基本原理:
(1):用户可以指定前景的大体区域,剩下的为背景区域。
(2):用户还可以明确指定某些地方为前景或背景。
(3):GrabCut采用分段迭代的方法分析前景物体形成模型树。
(3):最后根据权重决定某个像素是前景还是背景。
实战步骤:
(1):主体结构
(2):鼠标事件的处理
(3):调用GrabCut实现图像分割
1、主体结构:
class App:
def onmouse(self, event, x, y, flags, param):
print('onmouse')
def run(self):
print('run')
cv2.namedWindow('input')
cv2.setMouseCallback('input', self.onmouse)
img = cv2.imread('../resource/lena.bmp')
cv2.imshow('input', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
App().run()
2、鼠标事件的处理
class App:
flag_rect = False
startX = 0
startY = 0
def onmouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.flag_rect = True
self.startX = x
self.startY = y
print('LBUTTONDOWN: 左键按下')
elif event == cv2.EVENT_LBUTTONUP:
self.flag_rect = False
cv2.rectangle(self.img, (self.startX, self.startY), (x, y), (0, 0, 255), 3)
print('LBUTTONUP: 左键抬起')
elif event == cv2.EVENT_MOUSEMOVE:
if self.flag_rect == True:
self.img = self.img2.copy()
cv2.rectangle(self.img, (self.startX, self.startY), (x, y), (0, 255, 0), 3)
print('MOUSEMOVE: 鼠标移动')
print('onmouse')
def run(self):
print('run')
cv2.namedWindow('input')
cv2.setMouseCallback('input', self.onmouse)
self.img = cv2.imread('../resource/lena.bmp')
self.img2 = self.img.copy()
while (1):
cv2.imshow('input', self.img)
key = cv2.waitKey(100)
if key == 27:
break
App().run()
3、调用GrabCut实现图像分割
grabCut()
用法:
cv2.grabCut(img, mask, rect, bgdModel, fgdModel, iterCount, mode: None)
参数说明:
代码实现(完整代码):
import cv2
import numpy as np
class App:
flag_rect = False
rect = (0, 0, 0, 0)
startX = 0
startY = 0
def onmouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.flag_rect = True
self.startX = x
self.startY = y
print('LBUTTONDOWN: 左键按下')
elif event == cv2.EVENT_LBUTTONUP:
self.flag_rect = False
cv2.rectangle(self.img, (self.startX, self.startY), (x, y), (0, 0, 255), 3)
self.rect = (min(self.startX, x), min(self.startY, y), abs(self.startX - x), abs(self.startY - y))
print('LBUTTONUP: 左键抬起')
elif event == cv2.EVENT_MOUSEMOVE:
if self.flag_rect == True:
self.img = self.img2.copy()
cv2.rectangle(self.img, (self.startX, self.startY), (x, y), (0, 255, 0), 3)
print('MOUSEMOVE: 鼠标移动')
print('onmouse')
def run(self):
print('run')
cv2.namedWindow('input')
cv2.setMouseCallback('input', self.onmouse)
self.img = cv2.imread('../resource/lena.bmp')
self.img2 = self.img.copy()
self.mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
self.output = np.zeros(self.img.shape, np.uint8)
while (1):
cv2.imshow('input', self.img)
cv2.imshow('output', self.output)
key = cv2.waitKey(100)
if key == 27:
break
if key == ord('g'):
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
cv2.grabCut(self.img2, self.mask, self.rect, bgdModel, fgdModel, 1, cv2.GC_INIT_WITH_RECT)
mask2 = np.where(((self.mask == 1) | (self.mask == 3)), 255, 0).astype('uint8')
self.output = cv2.bitwise_and(self.img2, self.img2, mask=mask2)
App().run()
严格来说,该方法并不是用来对图像分割的,而是在色彩层面的平滑滤波。
它会中和色彩分布相近的颜色,平滑色彩细节,侵蚀掉面积较小的颜色区域。以图像上任一点p为圆心,半径为sp,色彩幅值为sr进行不断的迭代。
pyrMeanShiftFiltering()
用法:
cv2.pyrMeanShiftFiltering(src, sp, sr, dst: None, maxLevel: None, termcrit: None)
参数说明:
代码实现:
import cv2
img = cv2.imread('../resource/flower.png')
img_mean = cv2.pyrMeanShiftFiltering(img, 20, 30)
img_canny = cv2.Canny(img_mean, 150, 300)
contours, _ = cv2.findContours(img_canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255), 2)
cv2.imshow('img', img)
cv2.imshow('img_mean', img_mean)
cv2.imshow('img_canny', img_canny)
cv2.waitKey(0)
cv2.destroyAllWindows()
原视频:
原理:
MOG去背景: 混合高斯模型为基础的前景/背景分割算法
createBackgroundSubtractorMOG()
用法:
cv2.bgsegm.createBackgroundSubtractorMOG(history: None, nmixtures: None, backgroundRatio: None, noiseSigma: None)
参数说明:
代码实现:
import cv2
cap = cv2.VideoCapture('../resource/Car.mp4')
mog = cv2.bgsegm.createBackgroundSubtractorMOG()
while (True):
ret, frame = cap.read()
fgmask = mog.apply(frame)
cv2.imshow('img', fgmask)
key = cv2.waitKey(10)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
同MOG类似,不过对亮度产生的阴影有更好的识别.
createBackgroundSubtractorMOG2()
用法:
cv2.createBackgroundSubtractorMOG2(history: None, varThreshold: None, detectShadows: None)
参数说明:
代码实现:
import cv2
cap = cv2.VideoCapture('../resource/Car.mp4')
# 优点: 可以计算出阴影部分
# 缺点: 会产生很多噪点
mog2 = cv2.createBackgroundSubtractorMOG2()
while (True):
ret, frame = cap.read()
fgmask = mog2.apply(frame)
cv2.imshow('img', fgmask)
key = cv2.waitKey(10)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
静态背景图像估计和每个像素的贝叶斯分割,抗噪性更强。
createBackgroundSubtractorGMG()
用法:
cv2.bgsegm.createBackgroundSubtractorGMG(initializationFrames: None, decisionThreshold: None)
参数说明:
代码实现:
import cv2
cap = cv2.VideoCapture('../resource/Car.mp4')
# 优点: 可以算出阴影部分,同时减少了噪点
# 缺点: 如果采用默认值,则在开始会有很长时间不显示
gmg = cv2.bgsegm.createBackgroundSubtractorGMG(initializationFrames=10)
# 解决办法: 调整初始参考帧的数量
while (True):
ret, frame = cap.read()
fgmask = gmg.apply(frame)
cv2.imshow('img', fgmask)
key = cv2.waitKey(10)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
图像修复效果:
inpaint()
用法:
cv2.inpaint(src, inpaintMask, inpaintRadius, flags, dst: None)
参数说明:
代码实现:
import cv2
import numpy as np
img = cv2.imread('../resource/cvLogo_Ori.png')
mask = cv2.imread('../resource/cvLogo_Mask.png', 0)
dst = cv2.inpaint(img, mask, 5, cv2.INPAINT_TELEA)
cv2.imshow('img', np.hstack((img, dst)))
cv2.waitKey(0)
cv2.destroyAllWindows()