前文,包括Zernike矩的理论分析和7x7模板的代码,该代码的文献为:
高世一,赵明扬,张雷,邹媛媛.基于Zernike正交矩的图像亚像素边缘检测算法改进[J].自动化学报,2008(09):1163-1168.
该文章是在Ghosal S和Mehrotra R等人的基础上改进的。该文章为:
Ghosal S, Mehrotra R. Orthogonal moment operators for sub-pixel edge detection[J]. Pattern Recognition, 1993, 26(2):295-306.
以下为9x9模板的Zernike矩亚像素检测。9x9的模板来自于:
曲迎东,李荣德,白彦华,李润霞,马广辉.高速的9×9尺寸模板Zernike矩边缘算子[J].光电子.激光,2010,21(11):1683-1687.DOI:10.16136/j.joel.2010.11.024.
9x9模板的Zernike矩亚像素检测的python代码为:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
g_N = 7
a = 2 / 7
N = 35
M911R = np.array([0.0000, - 0.0012, - 0.0109, - 0.0095, 0.0000, 0.0095, 0.0109, 0.0012, 0.0000,
- 0.0016, - 0.0254, - 0.0220, - 0.0110, 0.0000, 0.0110, 0.0220, 0.0254, 0.0016,
- 0.0215, - 0.0329, - 0.0219, - 0.0110, 0.0000, 0.0110, 0.0219, 0.0329, 0.0215,
- 0.0380, - 0.0329, - 0.0219, - 0.0110, 0.0000, 0.0110, 0.0219, 0.0329, 0.0380,
- 0.0434, - 0.0329, - 0.0219, - 0.0110, 0.0000, 0.0110, 0.0219, 0.0329, 0.0434,
- 0.0380, - 0.0329, - 0.0219, - 0.0110, 0.0000, 0.0110, 0.0219, 0.0329, 0.0380,
- 0.0215, - 0.0329, - 0.0219, - 0.0110, 0.0000, 0.0110, 0.0219, 0.0329, 0.0215,
- 0.0016, - 0.0254, - 0.0220, - 0.0110, 0.0000, 0.0110, 0.0220, 0.0254, 0.0016,
0.0000, - 0.0012, - 0.0109, - 0.0095, 0.0000, 0.0095, 0.0109, 0.0012, 0.0000]).reshape(9, 9)
M911I = np.array([0.0000, 0.0016, 0.0215, 0.0380, 0.0434, 0.0380, 0.0215, 0.0016, 0.0000,
0.0012, 0.0254, 0.0329, 0.0329, 0.0329, 0.0329, 0.0329, 0.0254, 0.0012,
0.0109, 0.0219, 0.0219, 0.0219, 0.0219, 0.0219, 0.0219, 0.0219, 0.0109,
0.0094, 0.0110, 0.0110, 0.0110, 0.0110, 0.0110, 0.0110, 0.0110, 0.0094,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
- 0.0094, - 0.0110, - 0.0110, - 0.0110, - 0.0110, - 0.0110, - 0.0110, - 0.0110, - 0.0094,
- 0.0109, - 0.0219, - 0.0219, - 0.0219, - 0.0219, - 0.0219, - 0.0219, - 0.0219, - 0.0109,
- 0.0012, - 0.0254, - 0.0329, - 0.0329, - 0.0329, - 0.0329, - 0.0329, - 0.0254, - 0.0012,
- 0.0000, - 0.0016, - 0.0215, - 0.0380, - 0.0434, - 0.0380, - 0.0215, - 0.0016, - 0.0000]).reshape(9,
9)
M920 = np.array([0.0000, 0.0019, 0.0201, 0.0279, 0.0290, 0.0279, 0.0201, 0.0019, 0.0000,
0.0019, 0.0275, 0.0148, 0.002, 0.0048, 0.0002, -0.0148, 0.0275, 0.0019,
0.0201, 0.0148, - 0.0096, - 0.0242, - 0.0291, - 0.0242, - 0.0096, 0.0148, 0.0201,
0.0279, 0.0002, - 0.0242, - 0.0388, - 0.0437, - 0.0388, - 0.0242, 0.0002, 0.0279,
0.0290, - 0.0047, - 0.0291, - 0.0437, - 0.0486, - 0.0437, - 0.029, - 0.0047, 0.0290,
0.0279, 0.0002, - 0.0242, - 0.0388, - 0.0437, - 0.0388, - 0.0242, - 0.0002, 0.0279,
0.0201, 0.0148, - 0.0096, - 0.0242, - 0.0291, - 0.0242, - 0.0096, 0.0148, 0.0201,
0.0019, 0.0275, 0.0148, 0.0002, - 0.0048, 0.0002, 0.0148, 0.0275, 0.0019,
0.0000, 0.0019, 0.0201, 0.0279, 0.0290, 0.0279, 0.0201, 0.0019, 0.0000]).reshape(9, 9)
def zernike_9x9(path):
img = cv2.imread(path)
img = cv2.copyMakeBorder(img, 9, 9, 9, 9, cv2.BORDER_CONSTANT, value=[0, 0, 0])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
k = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img_gray1 = cv2.morphologyEx(img, cv2.MORPH_CLOSE, k, iterations=5)
# t, img_t = cv2.threshold(img_gray1, 0, 255, cv2.THRESH_OTSU)
canny_img = cv2.Canny(img_gray1, 0, 255)
ZerImgM911R = cv2.filter2D(canny_img, cv2.CV_64F, M911R)
ZerImgM911I = cv2.filter2D(canny_img, cv2.CV_64F, M911I)
ZerImgM920 = cv2.filter2D(canny_img, cv2.CV_64F, M920)
point_temporary_x = []
point_temporary_y = []
scatter_arr = cv2.findNonZero(ZerImgM920).reshape(-1, 2)
for idx in scatter_arr:
j, i = idx
M9_theta_temporary = np.arctan2(ZerImgM911I[i][j], ZerImgM911R[i][j])
M9_rotated_z11 = np.sin(M9_theta_temporary) * ZerImgM911I[i][j] + np.cos(M9_theta_temporary) * ZerImgM911R[i][j]
l_method3 = np.arctan2(ZerImgM920[i][j], M9_rotated_z11)
k1 = 3 * M9_rotated_z11 / (2 * (1 - l_method3 ** 2) ** 1.5)
# h = (ZerImgM00[i][j] - k * np.pi / 2 + k * np.arcsin(l_method2) + k * l_method2 * (1 - l_method2 ** 2) ** 0.5)
# / np.pi
k_value = 20
l_value = 2 ** 0.5 / g_N
if k1 >= k_value and l_method3 <= l_value:
y = i + g_N * l_method3 * np.sin(M9_theta_temporary) / 2 - 9
x = j + g_N * l_method3 * np.cos(M9_theta_temporary) / 2 - 9
point_temporary_x.append(x)
point_temporary_y.append(-y)
else:
continue
plt.figure()
plt.plot(point_temporary_x, point_temporary_y)
plt.show()
point_temporary_x = np.array(point_temporary_x)
point_temporary_y = np.array(point_temporary_y)
return point_temporary_x, point_temporary_y
以上方法中加入了Otsu的阈值分割方法。可以根据自身要求选择是否启用。