opencv-分水岭分割方法

分水岭算法是一种图像区域分割法,在分割的过程中,它会把跟临近像素间的相似性作为重要的参考依据,从而将在空间位置上相近并且灰度值相近的像素点互相连接起来构成一个封闭的轮廓,封闭性是分水岭算法的一个重要特征。
Opencv 中 watershed函数原型:

void watershed( InputArray image, InputOutputArray markers );

第一个参数 image,必须是一个8bit 3通道彩色图像矩阵序列,第一个参数没什么要说的。关键是第二个参数 markers:大意说的是在执行分水岭函数watershed之前,必须对第二个参数markers进行处理,它应该包含不同区域的轮廓,每个轮廓有一个自己唯一的编号,轮廓的定位可以通过Opencv中findContours方法实现,这个是执行分水岭之前的要求。

接下来执行分水岭会发生什么呢?算法会根据markers传入的轮廓作为种子(也就是所谓的注水点),对图像上其他的像素点根据分水岭算法规则进行判断,并对每个像素点的区域归属进行划定,直到处理完图像上所有像素点。而区域与区域之间的分界处的值被置为“-1”,以做区分。

总的概括一下watershed图像自动分割的实现步骤

  1. 图像灰度化、滤波、Canny边缘检测

  2. 查找轮廓,并且把轮廓信息按照不同的编号绘制到watershed的第二个入参merkers上,相当于标记注水点。

  3. watershed分水岭运算

  4. 绘制分割出来的区域,视觉控还可以使用随机颜色填充,或者跟原始图像融合以下,以得到更好的显示效果。

#include 
#include 

using namespace cv;
using namespace std;

int main(int argc, char** argv) {
	Mat src = imread("D:/b.jpeg");
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	namedWindow("input image", WINDOW_AUTOSIZE);
	imshow("input image", src);

	Mat gray, binary, shifted;
	pyrMeanShiftFiltering(src, shifted, 21, 51);
	//imshow("shifted", shifted);

	cvtColor(shifted, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
	//imshow("binary", binary);

	// distance transform
	Mat dist;
	distanceTransform(binary, dist, DistanceTypes::DIST_L2, 3, CV_32F);
	normalize(dist, dist, 0, 1, NORM_MINMAX);
	//imshow("distance result", dist);

	// binary
	threshold(dist, dist, 0.4, 1, THRESH_BINARY);
	//imshow("distance binary", dist);

	// markers
	Mat dist_m;
	dist.convertTo(dist_m, CV_8U);
	vector<vector<Point>> contours;
	findContours(dist_m, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));

	// create markers
	Mat markers = Mat::zeros(src.size(), CV_32SC1);
	for (size_t t = 0; t < contours.size(); t++) {
		drawContours(markers, contours, static_cast<int>(t), Scalar::all(static_cast<int>(t) + 1), -1);
	}
	circle(markers, Point(5, 5), 3, Scalar(255), -1);
	//imshow("markers", markers*10000);

	// 形态学操作 - 彩色图像,目的是去掉干扰,让结果更好
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(src, src, MORPH_ERODE, k);

	// 完成分水岭变换
	watershed(src, markers);
	Mat mark = Mat::zeros(markers.size(), CV_8UC1);
	markers.convertTo(mark, CV_8UC1);
	bitwise_not(mark, mark, Mat());
	//imshow("watershed result", mark);

	// generate random color
	vector<Vec3b> colors;
	for (size_t i = 0; i < contours.size(); i++) {
		int r = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int b = theRNG().uniform(0, 255);
		colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}

	// 颜色填充与最终显示
	Mat dst = Mat::zeros(markers.size(), CV_8UC3);
	int index = 0;
	for (int row = 0; row < markers.rows; row++) {
		for (int col = 0; col < markers.cols; col++) {
			index = markers.at<int>(row, col);
			if (index > 0 && index <= contours.size()) {
				dst.at<Vec3b>(row, col) = colors[index - 1];
			}
			else {
				dst.at<Vec3b>(row, col) = Vec3b(0, 0, 0);
			}
		}
	}

	imshow("Final Result", dst);
	printf("number of objects : %d\n", contours.size());

	waitKey(0);
	return 0;
}
#include 
#include 

using namespace cv;
using namespace std;

Mat watershedCluster(Mat& image, int& numSegments);
void createDisplaySegments(Mat& segments, int numSegments, Mat& image);
int main(int argc, char** argv) {
	Mat src = imread("D:/b.jpeg");
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	namedWindow("input image", WINDOW_AUTOSIZE);
	imshow("input image", src);

	int numSegments;
	Mat markers = watershedCluster(src, numSegments);
	createDisplaySegments(markers, numSegments, src);
	waitKey(0);
	return 0;
}

Mat watershedCluster(Mat& image, int& numComp) {
	// 二值化
	Mat gray, binary;
	cvtColor(image, gray, COLOR_BGR2GRAY);
	threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
	// 形态学与距离变换
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	morphologyEx(binary, binary, MORPH_OPEN, k, Point(-1, -1));
	Mat dist;
	distanceTransform(binary, dist, DistanceTypes::DIST_L2, 3, CV_32F);
	normalize(dist, dist, 0.0, 1.0, NORM_MINMAX);

	// 开始生成标记
	threshold(dist, dist, 0.1, 1.0, THRESH_BINARY);
	normalize(dist, dist, 0, 255, NORM_MINMAX);
	dist.convertTo(dist, CV_8UC1);

	// 标记开始
	vector<vector<Point>> contours;
	vector<Vec4i> hireachy;
	findContours(dist, contours, hireachy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
	if (contours.empty()) {
		return Mat();
	}

	Mat markers(dist.size(), CV_32S);
	markers = Scalar::all(0);
	for (int i = 0; i < contours.size(); i++) {
		drawContours(markers, contours, i, Scalar(i + 1), -1, 8, hireachy, INT_MAX);
	}
	circle(markers, Point(5, 5), 3, Scalar(255), -1);

	// 分水岭变换
	watershed(image, markers);
	numComp = contours.size();
	return markers;
}

void createDisplaySegments(Mat& markers, int numSegments, Mat& image) {
	// generate random color
	vector<Vec3b> colors;
	for (size_t i = 0; i < numSegments; i++) {
		int r = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int b = theRNG().uniform(0, 255);
		colors.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}

	// 颜色填充与最终显示
	Mat dst = Mat::zeros(markers.size(), CV_8UC3);
	int index = 0;
	for (int row = 0; row < markers.rows; row++) {
		for (int col = 0; col < markers.cols; col++) {
			index = markers.at<int>(row, col);
			if (index > 0 && index <= numSegments) {
				dst.at<Vec3b>(row, col) = colors[index - 1];
			}
			else {
				dst.at<Vec3b>(row, col) = Vec3b(255, 255, 255);
			}
		}
	}
	imshow("分水岭图像分割-演示", dst);
	return;
}

更换图片背景

#include 
#include 

using namespace cv;
using namespace std;

Mat mat_to_samples(Mat& image);
int main(int argc, char** argv) {
	Mat src = imread("D:/test10.jpg");
	if (src.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	namedWindow("input image", WINDOW_AUTOSIZE);
	imshow("input image", src);

	// 组装数据
	Mat points = mat_to_samples(src);

	// 运行KMeans
	int numCluster = 4;
	Mat labels;
	Mat centers;
	TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1);
	kmeans(points, numCluster, labels, criteria, 3, KMEANS_PP_CENTERS, centers);

	// 去背景+遮罩生成
	Mat mask = Mat::zeros(src.size(), CV_8UC1);
	int index = src.rows * 2 + 2;
	int cindex = labels.at<int>(index, 0);
	int height = src.rows;
	int width = src.cols;
	//Mat dst;
	//src.copyTo(dst);
	for (int row = 0; row < height; row++) {
		for (int col = 0; col < width; col++) {
			index = row * width + col;
			int label = labels.at<int>(index, 0);
			if (label == cindex) { // 背景
				//dst.at(row, col)[0] = 0;
				//dst.at(row, col)[1] = 0;
				//dst.at(row, col)[2] = 0;
				mask.at<uchar>(row, col) = 0;
			}
			else {
				mask.at<uchar>(row, col) = 255;
			}
		}
	}
	//imshow("mask", mask);

	// 腐蚀 + 高斯模糊
	Mat k = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
	erode(mask, mask, k);
	//imshow("erode-mask", mask);
	GaussianBlur(mask, mask, Size(3, 3), 0, 0);
	//imshow("Blur Mask", mask);

	// 通道混合
	RNG rng(12345);
	Vec3b color;
	color[0] = 217;//rng.uniform(0, 255);
	color[1] = 60;// rng.uniform(0, 255);
	color[2] = 160;// rng.uniform(0, 255);
	Mat result(src.size(), src.type());

	double w = 0.0;
	int b = 0, g = 0, r = 0;
	int b1 = 0, g1 = 0, r1 = 0;
	int b2 = 0, g2 = 0, r2 = 0;

	for (int row = 0; row < height; row++) {
		for (int col = 0; col < width; col++) {
			int m = mask.at<uchar>(row, col);
			if (m == 255) {
				result.at<Vec3b>(row, col) = src.at<Vec3b>(row, col); // 前景
			}
			else if (m == 0) {
				result.at<Vec3b>(row, col) = color; // 背景
			}
			else {
				w = m / 255.0;
				b1 = src.at<Vec3b>(row, col)[0];
				g1 = src.at<Vec3b>(row, col)[1];
				r1 = src.at<Vec3b>(row, col)[2];

				b2 = color[0];
				g2 = color[1];
				r2 = color[2];

				b = b1 * w + b2 * (1.0 - w);
				g = g1 * w + g2 * (1.0 - w);
				r = r1 * w + r2 * (1.0 - w);

				result.at<Vec3b>(row, col)[0] = b;
				result.at<Vec3b>(row, col)[1] = g;
				result.at<Vec3b>(row, col)[2] = r;
			}
		}
	}
	imshow("背景替换", result);

	waitKey(0);
	return 0;
}

Mat mat_to_samples(Mat& image) {
	int w = image.cols;
	int h = image.rows;
	int samplecount = w * h;
	int dims = image.channels();
	Mat points(samplecount, dims, CV_32F, Scalar(10));

	int index = 0;
	for (int row = 0; row < h; row++) {
		for (int col = 0; col < w; col++) {
			index = row * w + col;
			Vec3b bgr = image.at<Vec3b>(row, col);
			points.at<float>(index, 0) = static_cast<int>(bgr[0]);
			points.at<float>(index, 1) = static_cast<int>(bgr[1]);
			points.at<float>(index, 2) = static_cast<int>(bgr[2]);
		}
	}
	return points;
}

你可能感兴趣的:(opencv,opencv,python)