于2019/05/29开始学习
import cv2
from matplotlib import pyplot as plt
def test1():
image_gray = cv2.imread("E:\\lhcz\\images\\child.jpg", 0)
cv2.imshow("image_gray", image_gray)
# 使用plt绘制
plt.subplot(1, 1, 1), plt.imshow(image_gray, "gray")
plt.title("gray_image")
plt.xticks([]), plt.yticks([]) # 隐藏x轴与y轴刻度线
plt.show()
k = cv2.waitKey(0)
if k == 27: #esc
cv2.destroyAllWindows()
if __name__ == "__main__":
test1()
可以画直线,矩形,圆形,椭圆,多边形,添加文字等。也可以用鼠标画图。
注意,三通道为:BGR
import cv2
import numpy as np
from matplotlib import pyplot as plt
def test1():
img_black = np.zeros((512, 512, 3), np.uint8)
# 在黑色图片上画了一条对角的绿色直线
cv2.line(img_black, (0, 0), (511, 511), (0, 255, 0), 5)
cv2.imshow("image", img_black)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
if __name__ == "__main__":
test1()
import cv2
def test1():
img_path = r'E:\lhcz\images\child.jpg'
image = cv2.imread(img_path)
image_shape = image.shape
print(image_shape) # 图像形状,包含行、列、通道数的三元组。 (987, 1920, 3)
print(image_shape[0]) # 987
print(image.size) # 5685120 (987*1920*3)
print(image.dtype) # uint8 图像的数据类型
if __name__ == "__main__":
test1()
尽量使用numpy索引
如果你想在图像周边创建一个边,就像相框一样,可以使用cv2.copyMakeBorder()函数。这经常在卷积运算或者0填充时用到。
cv2.add()与cv2.addWeighted()函数
cv2.addWeighted()函数。可以拿一张图片看一下效果
import cv2
import numpy as np
from matplotlib import pyplot as plt
def test3():
path1 = r'E:\lhkj\python\opencv3\plate-recognition\image\car4.jpg'
image_gray = cv2.imread(path1, 0)
kernel = np.ones((20, 20), np.uint8)
image_open = cv2.morphologyEx(image_gray, cv2.MORPH_OPEN, kernel)
image_add = cv2.addWeighted(image_gray, 1, image_open, -1, 0)
images = [image_gray, image_open, image_add]
labels = ['gray', 'open', 'add']
for i in range(3):
plt.subplot(1, 3, i+1), plt.imshow(images[i], 'gray')
plt.title(labels[i])
plt.xticks([]), plt.yticks([])
plt.show()
if __name__ == "__main__":
test3()
# 按位运算
# 将阈值图像添加到另外一张图片上
def test4():
img1_path = r'E:\lhcz\images\nature.jpg'
img2_path = r'E:\lhcz\images\me.jpg'
img1 = cv2.imread(img1_path) # img1作为底图
img2 = cv2.imread(img2_path) # img2作为logo
cols, rows, channel = img2.shape
roi = img1[0:cols, 0:rows]
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 二值阈值,将大于阈值的置为255
ret, mask = cv2.threshold(img2_gray, 120, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# ret1, mask1 = cv2.threshold(img2_gray, 120, 255, cv2.THRESH_BINARY_INV)
# 取 roi 中与 mask 中不为0 的值对应的像素的值,其他为0
img1_bg = cv2.bitwise_and(roi, roi, mask=mask)
cv2.imshow("img1_bg", img1_bg)
cv2.imshow("inv", mask_inv)
img2_fg = cv2.bitwise_and(img2, img2, mask=mask_inv)
cv2.imshow("img2_fg", img2_fg)
dst = cv2.add(img1_bg, img2_fg)
img1[0:cols, 0:rows] = dst
cv2.imshow('sum', img1)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
def test30():
path = r'E:\lhcz\images\car.jpg'
image = cv2.imread(path)
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# 设定绿色的阈值
lower_blue = np.array([60, 155, 155])
upper_blue = np.array([60, 255, 255])
# 根据阈值构建掩模
mask = cv2.inRange(image_hsv, lower_blue, upper_blue)
cv2.imshow('mask', mask)
ret = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow('ret', ret)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
# 图像的几何变换
def test31():
path = r'E:\lhcz\images\car4.jpg'
image = cv2.imread(path, 0)
rows, cols = image.shape
image_size = cv2.resize(image, (2*cols, 2*rows), interpolation=cv2.INTER_CUBIC)
cv2.imshow('zoom',image_size)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
# 图像的平移
def test32():
path = r'E:\lhcz\images\car4.jpg'
image = cv2.imread(path, 0)
rows, cols, = image.shape
mask = np.float32([[1, 0, 30], [0, 1, 40]])
m = np.array([[1, 0, 30], [0, 1, 40]], np.float32)
image_move = cv2.warpAffine(image, m, (cols, rows))
cv2.imshow('move', image_move)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
cv2.threshold()
# 11指邻域大小,2一个常数,阈值为平均值或者加权平均值减去这个常数
mask2 = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
ret2, mask2 = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
path = r'E:\lhcz\images\car4.jpg'
image = cv2.imread(path, 0)
mask = np.ones((5, 5), np.float32) / 25
dst = cv2.filter2D(image, -1, mask)
cv2.imshow('image', image)
cv2.imshow('dst', dst)
dst = cv2.blur(image, (5, 5))
dst = cv2.GaussianBlur(image, (5, 5), 0)
dst = cv2.medianBlur(image, 5)
# 5 邻域直径,两个 75 分别是空间高斯函数标准差, 灰度值相似性高斯函数标准差
dst = cv2.bilateralFilter(image, 5, 75, 75)
主要有腐蚀、膨胀、开运算、闭运算等。
会把前景物体的边界腐蚀掉
kernel = np.ones((5, 5), np.uint8)
image_erode = cv2.erode(image, kernel, iterations=1)
dilate()函数可以对输入图像用特定结构元素进行膨胀操作,该结构元素确定膨胀操作过程中的邻域的形状,各点像素值将被替换为对应邻域上的最大值。
kernel = np.ones((5, 5), np.uint8)
image_dilate = cv2.dilate(image, kernel, iterations=1)
先进行腐蚀再进行膨胀就叫做开运算,被用来移除噪声。
kernel = np.ones((5, 5), np.uint8)
image_open = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
先膨胀再腐蚀。经常被用来填充前景物体中的小洞,或者前景物体上的小黑点。
image_close = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
一副图像膨胀与腐蚀的差别。结果看上去就像前景物体的轮廓。
kernel = np.ones((5, 5), np.uint8)
image_gradient = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel)
原始图像与进行开运算之后得到的图像的差。
image_tophat = cv2.morphologyEx(image, cv2.MORPH_TOPHAT, kernel)
进行闭运算之后得到的图像与原始图像的差。
image_blackhat = cv2.morphologyEx(image, cv2.MORPH_BLACKHAT, kernel)
image_sobel = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=-1)
image_scharr = cv2.Scharr(image, cv2.CV_64F, 0, 1)
image_laplacian = cv2.Laplacian(image, cv2.CV_64F)
image_canny = cv2.Canny(image, 100, 200)
绘制直方图:
def test40():
path = r'E:\lhcz\images\car4.jpg'
image = cv2.imread(path, 0)
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
# plt.hist(image.ravel(), 256, [0, 256])
plt.plot(hist, 'r')
plt.xlim([0, 256])
plt.show()
使用掩模
# 使用掩模
def test41():
path = r'E:\lhcz\images\car4.jpg'
image = cv2.imread(path, 0)
mask = np.zeros(image.shape[:2], np.uint8)
mask[100:300, 100:400] = 255
mask_image = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow('mask_image', mask_image)
hist_mask = cv2.calcHist([image], [0], mask, [256], [0, 256])
plt.plot(hist_mask, 'r')
plt.xlim([0, 256])
plt.show()
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
np实现:
# 直方图均衡化
def test43():
path = r'E:\lhcz\images\car4.jpg'
image = cv2.imread(path, 0)
hist, bins = np.histogram(image.flatten(), 256, [0, 256])
# cumsum()这个函数的功能是返回给定axis上的累计和
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf, 0)
cdf_m = (cdf_m - cdf_m.min()) * 255 / (cdf_m.max() - cdf_m.min())
cdf = np.ma.filled(cdf_m, 0).astype('uint8')
image2 = cdf[image]
cv2.imshow('1', image2)
k = cv2.waitKey(0)
opencv中的直方图均衡化:
# opencv 直方图均衡化
def test44():
path = r'E:\lhcz\images\me.jpg'
image = cv2.imread(path, 0)
image_equ = cv2.equalizeHist(image)
cv2.imshow('equ', image_equ)
# hstack()在水平方向上平铺,在此是将两张图片并排连接一起
res = np.hstack((image, image_equ))
cv2.imshow('res', res)
cv2.waitKey(0)
# CLAHE
def test45():
path = r'E:\lhcz\images\me.jpg'
image = cv2.imread(path, 0)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
res = clahe.apply(image)
cv2.imshow('clahe', res)
cv2.waitKey(0)
// todo
def test46():
path = r'E:\lhcz\images\me.jpg'
roi_path = r'E:\lhcz\images\roi.png'
# 待检测图像
image = cv2.imread(path)
image_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# roi
roi = cv2.imread(roi_path)
roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
# 计算直方图
M = cv2.calcHist([image_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
I = cv2.calcHist([roi_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
# 归一化
cv2.normalize(I, I, 0, 255, cv2.NORM_MINMAX)
dst = cv2.calcBackProject([image_hsv], [0, 1], I, [0, 180, 0, 256], 1)
# 卷积可以把分散的点联系在一起
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
dst = cv2.filter2D(dst, -1, disc)
# 阈值
ret, thresh = cv2.threshold(dst, 20, 255, 0)
thresh = cv2.merge((thresh, thresh, thresh))
res = cv2.bitwise_and(image, thresh)
res = np.hstack((image, thresh, res))
cv2.imshow('1', res)
cv2.waitKey(0)
2019/06/01 于儿童节找点乐趣
# harris角点检测
def test48():
path = r'E:\lhcz\images\coins.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_gray_f = np.float32(image_gray)
dst = cv2.cornerHarris(image_gray_f, 2, 3, 0.04)
# 膨胀
dst = cv2.dilate(dst, None)
image[dst > 0.01 * dst.max()] = [0, 0, 255]
cv2.imshow('dst', image)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
# 亚像素级精确度的角点
def test49():
path = r'E:\lhcz\images\coins.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_gray_f = np.float32(image_gray)
dst = cv2.cornerHarris(image_gray_f, 2, 3, 0.04)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)
dst = np.uint8(dst)
# 连通区域,查找连通区域
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
criteria =(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
# 亚像素角点检测
corners = cv2.cornerSubPix(image_gray_f, np.float32(centroids), (5, 5), (-1, -1), criteria)
res = np.hstack((centroids, corners))
res = np.int0(res)
image[res[:, 1], res[:, 0]] = [0, 0, 255]
image[res[:, 3], res[:, 2]] = [0, 255, 0]
cv2.imshow('image', image)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
# Shi-Tomasi 角点检测
def test50():
path = r'E:\lhcz\images\coins.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 灰度图像、最佳角点数、角点质量水平、两个角点间的最短欧式距离
corners = cv2.goodFeaturesToTrack(image_gray, 50, 0.01, 10)
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv2.circle(image, (x, y), 3, 255, -1)
cv2.imshow('image', image)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
# SIFT尺度不变特征变换
def test51():
path = r'E:\lhcz\images\coins.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(image_gray, None)
cv2.drawKeypoints(image_gray, kp, image)
cv2.imshow('image', image)
cv2.waitKey(0)
# SURF
def test52():
path = r'E:\lhcz\images\me.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create(500)
kp, desc = surf.detectAndCompute(image_gray, None)
cv2.drawKeypoints(image, kp, image, (0, 0, 255), 2)
cv2.imshow('image', image)
cv2.waitKey(0)
# fast
def test53():
path = r'E:\lhcz\images\me.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fast = cv2.FastFeatureDetector_create()
kp = fast.detect(image_gray, None)
image2 = cv2.drawKeypoints(image, kp, None, color=(255, 0, 0))
cv2.imshow('image2', image2)
fast1 = cv2.FastFeatureDetector_create(cv2.FastFeatureDetector_THRESHOLD, nonmaxSuppression=0, type=cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
kp1 = fast1.detect(image_gray, None)
image3 = cv2.drawKeypoints(image, kp1, None, color=(255, 0, 0))
cv2.imshow('image3', image3)
cv2.waitKey(0)
# ORB (Oriented FAST and Rotated BRIEF)
def test54():
path = r'E:\lhcz\images\me.jpg'
image = cv2.imread(path)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create(100)
kp = orb.detect(image_gray, None)
kp, desc = orb.compute(image_gray, kp)
image1 = cv2.drawKeypoints(image, kp, image, (255, 0, 0), 0)
cv2.imshow('image1', image1)
cv2.waitKey(0)
使用 OpenCV 中的蛮力(Brute-Force)匹配和 FLANN 匹配 。
# ORB描述符进行蛮力匹配
def test55():
path1 = r'E:\lhcz\images\me.jpg'
path2 = r'E:\lhcz\images\test1.jpg'
image1 = cv2.imread(path1, 0)
image2 = cv2.imread(path2, 0)
#
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(image1, None)
kp2, des2 = orb.detectAndCompute(image2, None)
bf = cv2.BFMatcher_create(cv2.NORM_HAMMING, True)
matches = bf.match(des1, des2)
matches = sorted(matches, key= lambda x: x.distance)
image3 = cv2.drawMatches(image1, kp1, image2, kp2, matches[:10], None, flags=2)
plt.imshow(image3), plt.show()
cv2.waitKey(0)
matches = bf.match(des1,des2) 返回值是一个 DMatch 对象列表。这个DMatch 对象具有下列属性:
# 对 SIFT 描述符进行蛮力匹配和比值测试
def test56():
path1 = r'E:\lhcz\images\me_1.jpg'
path2 = r'E:\lhcz\images\me_2.jpg'
image1 = cv2.imread(path1, 0)
image2 = cv2.imread(path2, 0)
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(image1, None)
kp2, des2 = sift.detectAndCompute(image2, None)
bf = cv2.BFMatcher_create()
matches = bf.knnMatch(des1, des2, 2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append([m])
image3 = cv2.drawMatchesKnn(image1, kp1, image2, kp2, good, None, flags=2)
plt.imshow(image3), plt.show()
FLANN 是快速最近邻搜索包(Fast_Library_for_Approximate_Nearest_Neighbors)的简称。它是一个对大数据集和高维特征进行最近邻搜索的算法的集合,而且这些算法都已经被优化过了。在面对大数据集时它的效果要好于 BFMatcher。
使用 FLANN 匹配,我们需要传入两个字典作为参数。这两个用来确定要使用的算法和其他相关参数等。第一个是 IndexParams。各种不同算法的信息可以在 FLANN 文档中找到。这里我们总结一下,对于 SIFT 和 SURF 等,我们可以传入的参数是:
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
但使用 ORB 时,我们要传入的参数如下。注释掉的值是文献中推荐使用的,但是它们并不适合所有情况,其他值的效果可能会更好。
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12
key_size = 12, # 20
multi_probe_level = 1) #2
第二个字典是 SearchParams。用它来指定递归遍历的次数。值越高结果越准确,但是消耗的时间也越多。如果你想修改这个值,传入参数:
search_params = dict(checks=50)
联合使用特征提取和 calib3d 模块中的 findHomography 在复杂图像中查找已知对象。