滤波、分割等预处理过程省略。
输入图像为灰度图,激光条纹水平走向。
检测出光条边界 l、h 后,把两边界的中间线(l + h)/2作为激光条纹的中心线。
#include
#include
int main(int argc, char** argv)
{
cv::Mat src_img = cv::imread("70.bmp", 0);
cv::Mat dst_img = src_img.clone();
cv::cvtColor(dst_img, dst_img, cv::COLOR_GRAY2RGB);
uchar *p = src_img.data;
std::vector<cv::Point> pts;
for (size_t i = 0; i < src_img.cols; ++i)
{
int y_min = INT_MIN, y_max = INT_MAX;
for (size_t j = 0; j < src_img.rows; ++j)
{
if (*(p + i + src_img.cols * j) != 0)
{
y_min = j;
break;
}
}
for (size_t j = src_img.rows - 1; j > 0; --j)
{
if (*(p + i + src_img.cols * j) != 0)
{
y_max = j;
break;
}
}
if (y_min >= 0 && y_max < src_img.rows && y_min < y_max)
{
pts.push_back(cv::Point(i, (y_min + y_max) / 2));
}
}
for (size_t i = 0; i < pts.size(); ++i)
{
cv::circle(dst_img, cv::Point(round(pts[i].x), round(pts[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
}
cv::imwrite("70.1.bmp", dst_img);
system("pause");
return EXIT_SUCCESS;
}
极值法是将激光条纹横截面上灰度值最大点作为激光条纹的中心。
#include
#include
int main(int argc, char** argv)
{
cv::Mat src_img = cv::imread("70.bmp", 0);
cv::Mat dst_img = src_img.clone();
cv::cvtColor(dst_img, dst_img, cv::COLOR_GRAY2RGB);
uchar *p = src_img.data;
std::vector<cv::Point> pts;
for (size_t i = 0; i < src_img.cols; ++i)
{
int col_scalar_max = 0;
int y = INT_MIN;
for (size_t j = 0; j < src_img.rows; ++j)
{
if (*(p + i + src_img.cols * j) > col_scalar_max)
{
col_scalar_max = *(p + i + src_img.cols * j);
y = j;
}
}
pts.push_back(cv::Point(i, y));
}
for (size_t i = 0; i < pts.size(); ++i)
{
cv::circle(dst_img, cv::Point(round(pts[i].x), round(pts[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
}
cv::imwrite("70.2.bmp", dst_img);
system("pause");
return EXIT_SUCCESS;
}
骨架细化法是重复地剥掉二值图像的边界像素,在剥离的过程中必须保持目标的连通性,直到得到图像的骨架。
具体原理介绍可见Zhang-Suen 图像骨架提取算法的原理和OpenCV实现
#include
#include
int main(int argc, char** argv)
{
cv::Mat src_img = cv::imread("70.bmp", 0);
cv::Mat dst_img = src_img.clone();
cv::cvtColor(dst_img, dst_img, cv::COLOR_GRAY2RGB);
//zhang细化算法(中轴变换法)
cv::Mat copy_img = src_img.clone();
while (1)
{
bool stop = false;
//step1
for (int i = 1; i < src_img.cols - 1; i++)
for (int j = 0; j < src_img.rows; j++)
{
if (src_img.at<uchar>(j, i)>0)
{
int p1 = int(src_img.at<uchar>(j, i))>0 ? 1 : 0;
int p2 = int(src_img.at<uchar>(j - 1, i))>0 ? 1 : 0;
int p3 = int(src_img.at<uchar>(j - 1, i + 1))>0 ? 1 : 0;
int p4 = int(src_img.at<uchar>(j, i + 1))>0 ? 1 : 0;
int p5 = int(src_img.at<uchar>(j + 1, i + 1))>0 ? 1 : 0;
int p6 = int(src_img.at<uchar>(j + 1, i))>0 ? 1 : 0;
int p7 = int(src_img.at<uchar>(j + 1, i - 1))>0 ? 1 : 0;
int p8 = int(src_img.at<uchar>(j, i - 1))>0 ? 1 : 0;
int p9 = int(src_img.at<uchar>(j - 1, i - 1))>0 ? 1 : 0;
int np1 = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9;
int sp2 = (p2 == 0 && p3 == 1) ? 1 : 0;
int sp3 = (p3 == 0 && p4 == 1) ? 1 : 0;
int sp4 = (p4 == 0 && p5 == 1) ? 1 : 0;
int sp5 = (p5 == 0 && p6 == 1) ? 1 : 0;
int sp6 = (p6 == 0 && p7 == 1) ? 1 : 0;
int sp7 = (p7 == 0 && p8 == 1) ? 1 : 0;
int sp8 = (p8 == 0 && p9 == 1) ? 1 : 0;
int sp9 = (p9 == 0 && p2 == 1) ? 1 : 0;
int sp1 = sp2 + sp3 + sp4 + sp5 + sp6 + sp7 + sp8 + sp9;
if (np1 >= 2 && np1 <= 6 && sp1 == 1 && ((p2*p4*p6) == 0) && ((p4*p6*p8) == 0))
{
stop = true;
copy_img.at<uchar>(j, i) = 0;
}
}
}
//step2
for (int i = 1; i < copy_img.cols - 1; i++)
{
for (int j = 0; j < src_img.rows; j++)
{
if (src_img.at<uchar>(j, i)>0)
{
int p2 = int(src_img.at<uchar>(j - 1, i))>0 ? 1 : 0;
int p3 = int(src_img.at<uchar>(j - 1, i + 1)) > 0 ? 1 : 0;
int p4 = int(src_img.at<uchar>(j, i + 1)) > 0 ? 1 : 0;
int p5 = int(src_img.at<uchar>(j + 1, i + 1)) > 0 ? 1 : 0;
int p6 = int(src_img.at<uchar>(j + 1, i)) > 0 ? 1 : 0;
int p7 = int(src_img.at<uchar>(j + 1, i - 1)) > 0 ? 1 : 0;
int p8 = int(src_img.at<uchar>(j, i - 1)) > 0 ? 1 : 0;
int p9 = int(src_img.at<uchar>(j - 1, i - 1)) > 0 ? 1 : 0;
int np1 = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9;
int sp2 = (p2 == 0 && p3 == 1) ? 1 : 0;
int sp3 = (p3 == 0 && p4 == 1) ? 1 : 0;
int sp4 = (p4 == 0 && p5 == 1) ? 1 : 0;
int sp5 = (p5 == 0 && p6 == 1) ? 1 : 0;
int sp6 = (p6 == 0 && p7 == 1) ? 1 : 0;
int sp7 = (p7 == 0 && p8 == 1) ? 1 : 0;
int sp8 = (p8 == 0 && p9 == 1) ? 1 : 0;
int sp9 = (p9 == 0 && p2 == 1) ? 1 : 0;
int sp1 = sp2 + sp3 + sp4 + sp5 + sp6 + sp7 + sp8 + sp9;
if (np1 >= 2 && np1 <= 6 && sp1 == 1 && (p2*p4*p8) == 0 && (p2*p6*p8) == 0)
{
stop = true;
copy_img.at<uchar>(j, i) = 0;
}
}
}
}
copy_img.copyTo(src_img);
if (!stop)
{
break;
}
}
uchar *p = copy_img.data;
std::vector<cv::Point> pts;
for (int i = 0; i < copy_img.cols; i++)
for (int j = 0; j < copy_img.rows; j++)
if (*(p + i + src_img.cols * j) != 0)
pts.push_back(cv::Point(i, j));
for (size_t i = 0; i < pts.size(); ++i)
{
cv::circle(dst_img, cv::Point(round(pts[i].x), round(pts[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
}
cv::imwrite("70.3.bmp", dst_img);
system("pause");
return EXIT_SUCCESS;
}
灰度重心法就是对图像中的每一列(行)提取灰度重心作为激光条纹的中心位置。若某行的非零区间为[p,q],则该行的灰度重心位置为:
式中,I i 是第 i 个像素点的灰度值。
#include
#include
int main(int argc, char** argv)
{
cv::Mat src_img = cv::imread("70.bmp", 0);
cv::Mat dst_img = src_img.clone();
cv::cvtColor(dst_img, dst_img, cv::COLOR_GRAY2RGB);
uchar *p = src_img.data;
std::vector<cv::Point2f> pts;
for (size_t i = 0; i < src_img.cols; ++i)
{
int sum = 0;
float y = 0;
for (size_t j = 0; j < src_img.rows; ++j)
{
int s = *(p + i + src_img.cols * j);
if (s)
{
sum += s;
y += j*s;
}
}
if (sum)
{
y /= sum;
pts.push_back(cv::Point2f(i, y));
}
}
for (size_t i = 0; i < pts.size(); ++i)
{
cv::circle(dst_img, cv::Point(round(pts[i].x), round(pts[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
}
cv::imwrite("70.4.bmp", dst_img);
system("pause");
return EXIT_SUCCESS;
}
#include
#include
//亚像素灰度值计算
float ijpixel(float x, float y, cv::Mat& img)
{
int x_0 = int(x);
int x_1 = int(x + 1);
int y_0 = int(y);
int y_1 = int(y + 1);
int px_0y_0 = int(img.at<uchar>(y_0, x_0));
int px_0y_1 = int(img.at<uchar>(y_1, x_0));
int px_1y_0 = int(img.at<uchar>(y_0, x_1));
int px_1y_1 = int(img.at<uchar>(y_1, x_1));
float x_y0 = px_0y_0 + (x - float(x_0))*(px_1y_0 - px_0y_0);
float x_y1 = px_0y_1 + (x - float(x_0))*(px_1y_1 - px_0y_1);
float x_y = x_y0 + (y - float(y_0))*(x_y1 - x_y0);
return x_y;
}
int main(int argc, char** argv)
{
cv::Mat src_img = cv::imread("70.bmp", 0);
cv::Mat dst_img = src_img.clone();
cv::cvtColor(dst_img, dst_img, cv::COLOR_GRAY2RGB);
uchar *p = src_img.data;
std::vector<cv::Point2f> pts;
//灰度重心法//
for (size_t i = 0; i < src_img.cols; ++i)
{
int sum = 0;
float y = 0;
for (size_t j = 0; j < src_img.rows; ++j)
{
int s = *(p + i + src_img.cols * j);
if (s)
{
sum += s;
y += j*s;
}
}
if (sum)
{
y /= sum;
pts.push_back(cv::Point2f(i, y));
}
}
//法向迭代质心法//
int iter = 0;
int max_iter = 10;
float distance = 0;
float thre_distance = 0.05;
std::vector<cv::Point2f> pts_new = pts;
do
{
int pts_tmp_size = 5;
assert((pts_tmp_size - 1) % 2 == 0);
for (size_t i = (pts_tmp_size - 1) % 2; i < pts.size() - (pts_tmp_size - 1) % 2; ++i)
{
std::vector<cv::Point2f> pts_tmp;
for (size_t j = 0; j < pts_tmp_size; ++j)
{
pts_tmp.push_back(pts[i + j - (pts_tmp_size - 1) % 2]);
}
cv::Vec4f line_para;
cv::fitLine(pts_tmp, line_para, cv::DIST_L2, 0, 1e-2, 1e-2); //最小二乘拟合直线方程系数
float k = line_para[1] / line_para[0];
float sin_theta = 1 / sqrt(k * k + 1);
float cos_theta = - k / sqrt(k * k + 1);
float sum = ijpixel(pts[i].x, pts[i].y, src_img);
float sumx = ijpixel(pts[i].x, pts[i].y, src_img)* pts[i].x;
float sumy = ijpixel(pts[i].x, pts[i].y, src_img)* pts[i].y;
int range = 10;
for (size_t j = 1; j < range; ++j)
{
float x_cor_left = pts[i].x - j*cos_theta;
float y_cor_left = pts[i].y - j*sin_theta;
float x_cor_right = pts[i].x + j*cos_theta;
float y_cor_right = pts[i].y + j*sin_theta;
if (x_cor_left >= 0 && x_cor_left<src_img.cols && y_cor_left >= 0 && y_cor_left<src_img.rows &&
x_cor_right >= 0 && x_cor_right<src_img.cols && y_cor_right >= 0 && y_cor_right< src_img.rows)
{
if (ijpixel(x_cor_left, y_cor_left, src_img) || ijpixel(x_cor_right, y_cor_right, src_img))
{
sum += ijpixel(x_cor_left, y_cor_left, src_img) + ijpixel(x_cor_right, y_cor_right, src_img);
sumx += ijpixel(x_cor_left, y_cor_left, src_img)*x_cor_left + ijpixel(x_cor_right, y_cor_right, src_img)*x_cor_right;
sumy += ijpixel(x_cor_left, y_cor_left, src_img)*y_cor_left + ijpixel(x_cor_right, y_cor_right, src_img)*y_cor_right;
}
}
}
pts_new[i].x = (float)sumx / sum;
pts_new[i].y = (float)sumy / sum;
}
distance = 0;
for (int i = 0; i < pts.size(); i++)
{
distance += pow(pts_new[i].x - pts[i].x, 2) + pow(pts_new[i].y - pts[i].y, 2);
}
distance = sqrt(distance/ pts.size());
iter++;
std::cout << iter << "\t" << distance << std::endl;
pts = pts_new;
} while (iter < max_iter && distance > thre_distance);
for (size_t i = 0; i < pts.size(); ++i)
{
cv::circle(dst_img, cv::Point(round(pts[i].x), round(pts[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
}
cv::imwrite("70.5.bmp", dst_img);
system("pause");
return EXIT_SUCCESS;
}
#include
#include
int main(int argc, char** argv)
{
cv::Mat src_img = cv::imread("14.bmp", 0);
cv::Mat dst_img = src_img.clone();
cv::cvtColor(dst_img, dst_img, cv::COLOR_GRAY2RGB);
std::vector<cv::Point2f> pts;
//src_img.convertTo(src_img, CV_32FC1);
cv::GaussianBlur(src_img, src_img, cv::Size(0, 0), 6, 6);
//一阶偏导数
cv::Mat m1, m2;
m1 = (cv::Mat_<float>(1, 3) << 1, 0, -1); //x偏导
m2 = (cv::Mat_<float>(3, 1) << 1, 0, -1); //y偏导
cv::Mat dx, dy;
cv::filter2D(src_img, dx, CV_32FC1, m1); //卷积
cv::filter2D(src_img, dy, CV_32FC1, m2);
//二阶偏导数
cv::Mat m3, m4, m5;
m3 = (cv::Mat_<float>(1, 3) << 1, -2, 1); //二阶x偏导
m4 = (cv::Mat_<float>(3, 1) << 1, -2, 1); //二阶y偏导
m5 = (cv::Mat_<float>(2, 2) << 1, -1, -1, 1); //二阶xy偏导
cv::Mat dxx, dyy, dxy;
cv::filter2D(src_img, dxx, CV_32FC1, m3);
cv::filter2D(src_img, dyy, CV_32FC1, m4);
cv::filter2D(src_img, dxy, CV_32FC1, m5);
//hessian矩阵
for (size_t i = 0; i < src_img.cols; ++i)
{
for (size_t j = 0; j < src_img.rows; ++j)
{
if (src_img.at<uchar>(j, i) > 50)
{
cv::Mat hessian(2, 2, CV_32FC1);
hessian.at<float>(0, 0) = dxx.at<float>(j, i);
hessian.at<float>(0, 1) = dxy.at<float>(j, i);
hessian.at<float>(1, 0) = dxy.at<float>(j, i);
hessian.at<float>(1, 1) = dyy.at<float>(j, i);
cv::Mat eValue, eVectors;
cv::eigen(hessian, eValue, eVectors); //求特征值与特征向量
double nx, ny;
if (fabs(eValue.at<float>(0, 0)) >= fabs(eValue.at<float>(1, 0))) //求特征值最大时对应的特征向量
{
nx = eVectors.at<float>(0, 0);
ny = eVectors.at<float>(0, 1);
}
else
{
nx = eVectors.at<float>(1, 0);
ny = eVectors.at<float>(1, 1);
}
double t = -(nx*dx.at<float>(j, i) + ny*dy.at<float>(j, i)) /
(nx*nx*dxx.at<float>(j, i) + 2 * nx*ny*dxy.at<float>(j, i) + ny*ny*dyy.at<float>(j, i));
if (fabs(t*nx) <= 0.5 && fabs(t*ny) <= 0.5)
{
pts.push_back(cv::Point2f(i + t*nx, j + t*ny));
}
}
}
}
for (size_t i = 0; i < pts.size(); ++i)
{
cv::circle(dst_img, cv::Point(round(pts[i].x), round(pts[i].y)), 0.5, cv::Scalar(0, 0, 255), -1);
}
cv::imwrite("70.6.bmp", dst_img);
system("pause");
return EXIT_SUCCESS;
}
后续文章:中心线提取–GPU加速