Mat image;
image = imread("img/whiteCarLaneSwitch.jpg");
imread函数
Mat cv::imread ( const String & filename,
int flags = IMREAD_COLOR
)
参数1:filename—需要载入图片的路径名。主要有绝对路径法、相对路径法、命令行参数法等,具体可参考使用imread()函数读取图片的六种正确姿势。
参数2:flags—加载图像的颜色类型(cv::ImreadModes)。默认是3通道BGR彩色图像。
Mat gray_image;
cv::cvtColor(image, gray_image, COLOR_BGR2GRAY);
cvtColor函数
void cv::cvtColor(cv::InputArray src,
cv::OutputArray dst,
int code,
int dstCn = 0)
参数1:src—输入图像即要进行颜色空间变换的原图像。
参数2:dst—输出图像即进行颜色空间变换后存储图像。
参数3:code—转换的代码或标识,即在此确定将什么制式的图片转换成什么制式的图片。cv::ColorConversionCodes
参数4:dstCn—目标图像通道数,如果取值为0,则由src和code决定。
Mat blur_image;
//bilateralFilter(gray_image, blur_image, 13, 15, 15);
cv::GaussianBlur(gray_image, blur_image, cv::Size(3, 3), 0, 0);
GaussianBlur函数
void cv::GaussianBlur(cv::InputArray src,
cv::OutputArray dst,
cv::Size ksize,
double sigmaX,
double sigmaY = (0.0),
int borderType = 4)
参数1:src—输入图像;图像可以具有任意数量的通道,这些通道是独立处理的。
参数2:dst—输出图像;与src具有相同大小和类型。
参数3:ksize—高斯核大小;ksize.width和ksize.height可以不同,但它们都必须为正奇数。 或者,它们可以为零,然后根据sigma计算得出。
参数4:sigmaX—高斯核在x方向的标准差。
参数5:sigmaY—高斯核在y方向的标准差。sigmaY=0时,其值自动由sigmaX确定(sigmaY=sigmaX);sigmaY=sigmaX=0时,它们的值将由ksize.width和ksize.height自动确定。
参数6:borderType—像素外插策略。
Mat canny_image;
cv::Canny(blur_image, canny_image, 50, 150, 3);
Canny函数
void cv::Canny(cv::InputArray image,
cv::OutputArray edges,
double threshold1,
double threshold2,
int apertureSize = 3,
bool L2gradient = false)
参数1:image—输入图像;8位。
参数2:edges—输出边缘;单通道8位图像,其大小与image相同。
参数3:threshold1—阈值1
参数4:threshold2—阈值2
参数5:apertureSize—Sobel算子大小。
参数6:L2gradient—是否采用更精确的方式计算图像梯度。
关于阈值:
//感兴趣区域 ROI
Mat region_of_interest(Mat& srcImg)
{
Mat dstImg;
Mat mask = Mat::zeros(srcImg.size(), CV_8UC1);
int img_width = srcImg.size().width;
int img_height = srcImg.size().height;
vector<vector<Point>> contour;
vector<Point> pts;
pts.push_back(Point(img_width * 0.45, img_height * 0.6));
pts.push_back(Point(img_width * 0.55, img_height * 0.6));
pts.push_back(Point(img_width, img_height));
pts.push_back(Point(img_width, img_height));
pts.push_back(Point(0, img_height));
pts.push_back(Point(0, img_height));
contour.push_back(pts);
drawContours(mask, contour, 0, Scalar::all(255), -1);
srcImg.copyTo(dstImg, mask);
return dstImg;
}
Mat ROI_image;
ROI_image= region_of_interest(canny_image);
//霍夫变换
Mat Hough_image;
cv::cvtColor(ROI_image, Hough_image, COLOR_GRAY2BGR);
vector<Vec4f>plines;//定义一个存放直线信息的向量
double rho = 1;
double theta = CV_PI / 180;
int threshold = 15;
double minLineLength = 40;
double maxLineGap = 20;
cv::HoughLinesP(ROI_image, plines, rho, theta, threshold, minLineLength, maxLineGap);
for (size_t i = 0; i < plines.size(); i++)
{
//Vec4f point1 = plines[i];
//cv::line(Hough_image, Point(point1[0], point1[1]), Point(point1[2], point1[3]), Scalar(255, 255, 0), 1, LINE_AA);
cv::line(Hough_image, Point(plines[i][0], plines[i][1]),
Point(plines[i][2], plines[i][3]), Scalar(255, 255, 0), 1, 8);
}
Mat draw_lines(Mat& img, vector<Vec4f> lines)
{
Mat line_image = img.clone();
vector<Point> PointsL, PointsR;
cv::Vec4f lineL, lineR;
int y_min = img.size().height;
int y_max = 0;
for (int i = 0; i < lines.size(); i++)
{
Vec4f point1 = lines[i];
double k;
if (point1[1] < point1[3])
{
if (point1[1] < y_min)
{
y_min = point1[1];
}
if (point1[3] > y_max)
{
y_max = point1[3];
}
k=(point1[3]-point1[1])/(point1[2]-point1[0]);
if (k>0.3&&k<2.0)
{
PointsR.push_back(Point(point1[0], point1[1]));
PointsR.push_back(Point(point1[2], point1[3]));
}
else if(k<-0.3&&k>-2.0)
{
PointsL.push_back(Point(point1[0], point1[1]));
PointsL.push_back(Point(point1[2], point1[3]));
}
}
else
{
if (point1[3] < y_min)
{
y_min = point1[3];
}
if (point1[1] > y_max)
{
y_max = point1[1];
}
k=(point1[3]-point1[1])/(point1[2]-point1[0]);
if (k>0.3&&k<2.0)
{
PointsR.push_back(Point(point1[0], point1[1]));
PointsR.push_back(Point(point1[2], point1[3]));
}
else if(k<-0.3&&k>-2.0)
{
PointsL.push_back(Point(point1[0], point1[1]));
PointsL.push_back(Point(point1[2], point1[3]));
}
}
}
//直线拟合
cv::fitLine(PointsL, lineL, DIST_HUBER, 0, 1e-2, 1e-2);
cv::fitLine(PointsR, lineR, DIST_HUBER, 0, 1e-2, 1e-2);
double k1,k2,b1,b2;
cv::Point pointL1,pointL2, pointR1, pointR2;
k1 = lineL[1] / lineL[0];
b1 = lineL[3] - k1 * lineL[2];
k2 = lineR[1] / lineR[0];
b2 = lineR[3] - k2 * lineR[2];
pointL1.x = (y_min - b1) / k1;
pointL1.y = y_min;
pointL2.x = (y_max - b1) / k1;
pointL2.y = y_max;
pointR1.x = (y_min - b2) / k2;
pointR1.y = y_min;
pointR2.x = (y_max - b2) / k2;
pointR2.y = y_max;
cv::line(line_image, Point(pointL1.x, pointL1.y), Point(pointL2.x, pointL2.y), Scalar(0, 0, 255), 2, LINE_AA);
cv::line(line_image, Point(pointR1.x, pointR1.y), Point(pointR2.x, pointR2.y), Scalar(255, 0, 0), 2, LINE_AA);
//cv::imshow("line_image", line_image);
return line_image;
}
Mat lane_image;
lane_image=draw_lines(image, plines);
udacity/CarND-LaneLines-P1
无人驾驶技术入门(十四)| 初识图像之初级车道线检测
OpenCV-4.5.0 Docs