最近在unity仿真上做智能驾驶,车道线识别这块,网上找了很多教程,很多都行不通,要不就是不适用,总之各种问题,干脆自己写一个(参考了很多前人的经验),代码粘在最下面,备忘。不bb,先上图
首先是原图
然后是效果图
unity截图
unity效果截图
看起来复杂,其实套路都一样,就是调参的时候费劲。
我是基于图片写的,视频的话需要拆成一帧一帧的,原理同下。
1、首先高斯降噪。GaussianBlur()
2、然后,灰度,二值,边缘检测。 cvtColor->threshold->filter2D
这里推荐边缘检测用卷积,其他的方式也行,但是感觉效果不太好。
3、扣下敏感部分,画框填充,&抠图。fillConvexPoly->bitwise_and
4、重点,霍夫曼直线检测。HoughLinesP,
概率Hough变换,HoughLinesP只通过分析点的子集并估计这些点都属于一条直线,说白了是因为有的车道线是虚线,所以不推荐用HoughLines
5、将所有线分成左线右线。首先根据直线斜率找出直线,然后根据x轴分成左线右线
6、对根据得到的信息进行直线拟合,fitLine,得到四个顶点
7、根据直线斜率判断行驶方向。
8、将线画在图上。
unity仿真端就是将相机得到的图片保存下来,然后调用c++打包的dll。
首先,放个摄像机,给摄像机一个texture
然后,将这个texture生成到本地。
代码见下
还有一点很重要的是,我的方法是将unity的图片生成到本地,也就是外存,然后opencv读图图片,再处理,如果想将图片显示在unity界面上,opencv还要将图片生成到本地,unity再读取加载,光是内外存速度问题,这样一大圈下来,也会让程序非常非常非常非常非常卡,目前有两种解决办法,
再贴一遍地址:https://blog.csdn.net/qq_25490573/article/details/99671398
第一种是,unity将相机的图片生成在内存里,然后让opencv的dll直接在内存直接进行操作。
这样一来就有很多问题了,考虑到两种语言对照问题,用了byte和无符号char,首先,需要两边生成图片的格式一样,比如opencv解图片是三通道,unity需要对应rgb24,但实际解的时候问题频出,unity说崩就崩。unity图片0点是左下角,opencv却是右上角。这意味着现有的Encode...方法的不适用。
解决这个问题的方法是。。。项目被领导砍了,,,有生之年再补上
另一种方法是,借助opencvforunity插件实现,实际也是上面的方法,只是人家给你封装好了不需要你再进行操作。
这个头文件是必须的
#include
#include
#include
class LaneDetector
{
private:
double img_size;
double img_center;
bool left_flag = false;
bool right_flag = false;
cv::Point right_b;
double right_m; // y = m*x + b
cv::Point left_b;
double left_m;
public:
cv::Mat deNoise(cv::Mat inputImage); // Apply Gaussian blurring to the input Image
cv::Mat edgeDetector(cv::Mat img_noise); // Filter the image to obtain only edges
cv::Mat mask(cv::Mat img_edges); // Mask the edges image to only care about ROI
std::vector houghLines(cv::Mat img_mask); // Detect Hough lines in masked edges image
std::vector > lineSeparation(std::vector lines, cv::Mat img_edges); // Sprt detected lines by their slope into right and left lines
std::vector regression(std::vector > left_right_lines, cv::Mat inputImage); // Get only one line for each side of the lane
std::string predictTurn(); // Determine if the lane is turning or not by calculating the position of the vanishing point
int plotLane(cv::Mat inputImage, std::vector lane, std::string turn); // Plot the resultant lane and turn prediction in the frame.
};
//extern "C" __declspec(dllexport) int lanedet();
这个cpp也是必须要有的
#include "stdafx.h"
#include
#include
#include
#include "LaneDetector.h"
// IMAGE BLURRING
/**
*@brief Apply gaussian filter to the input image to denoise it
*@param inputImage is the frame of a video in which the
*@param lane is going to be detected
*@return Blurred and denoised image
*/
cv::Mat LaneDetector::deNoise(cv::Mat inputImage) {
cv::Mat output;
cv::GaussianBlur(inputImage, output, cv::Size(3, 3), 0, 0);
return output;
}
// EDGE DETECTION
/**
*@brief Detect all the edges in the blurred frame by filtering the image
*@param img_noise is the previously blurred frame
*@return Binary image with only the edges represented in white
*/
cv::Mat LaneDetector::edgeDetector(cv::Mat img_noise) {
cv::Mat output;
cv::Mat kernel;
cv::Point anchor;
// Convert image from RGB to gray
cv::cvtColor(img_noise, output, cv::COLOR_RGB2GRAY);
// Binarize gray image
cv::threshold(output, output, 140, 255, cv::THRESH_BINARY);
// Create the kernel [-1 0 1]
// This kernel is based on the one found in the
// Lane Departure Warning System by Mathworks
anchor = cv::Point(-1, -1);
kernel = cv::Mat(1, 3, CV_32F);
kernel.at(0, 0) = -1;
kernel.at(0, 1) = 0;
kernel.at(0, 2) = 1;
// Filter the binary image to obtain the edges
cv::filter2D(output, output, -1, kernel, anchor, 0, cv::BORDER_DEFAULT);
cv::imshow("output", output);
return output;
}
// MASK THE EDGE IMAGE
/**
*@brief Mask the image so that only the edges that form part of the lane are detected
*@param img_edges is the edges image from the previous function
*@return Binary image with only the desired edges being represented
*/
cv::Mat LaneDetector::mask(cv::Mat img_edges) {
cv::Mat output;
cv::Mat mask = cv::Mat::zeros(img_edges.size(), img_edges.type());
cv::Point pts[4] = {
cv::Point(210, 720),
cv::Point(550, 450),
cv::Point(717, 450),
cv::Point(1280, 720)
};
// Create a binary polygon mask
cv::fillConvexPoly(mask, pts, 4, cv::Scalar(255, 0, 0));
// Multiply the edges image and the mask to get the output
cv::bitwise_and(img_edges, mask, output);
return output;
}
// HOUGH LINES
/**
*@brief Obtain all the line segments in the masked images which are going to be part of the lane boundaries
*@param img_mask is the masked binary image from the previous function
*@return Vector that contains all the detected lines in the image
*/
std::vector LaneDetector::houghLines(cv::Mat img_mask) {
std::vector line;
// rho and theta are selected by trial and error
HoughLinesP(img_mask, line, 1, CV_PI / 180, 20, 20, 30);
return line;
}
// SORT RIGHT AND LEFT LINES
/**
*@brief Sort all the detected Hough lines by slope.
*@brief The lines are classified into right or left depending
*@brief on the sign of their slope and their approximate location
*@param lines is the vector that contains all the detected lines
*@param img_edges is used for determining the image center
*@return The output is a vector(2) that contains all the classified lines
*/
std::vector > LaneDetector::lineSeparation(std::vector lines, cv::Mat img_edges) {
std::vector > output(2);
size_t j = 0;
cv::Point ini;
cv::Point fini;
double slope_thresh = 0.3;
std::vector slopes;
std::vector selected_lines;
std::vector right_lines, left_lines;
// Calculate the slope of all the detected lines
for (auto i : lines) {
ini = cv::Point(i[0], i[1]);
fini = cv::Point(i[2], i[3]);
// Basic algebra: slope = (y1 - y0)/(x1 - x0)
double slope = (static_cast(fini.y) - static_cast(ini.y)) / (static_cast(fini.x) - static_cast(ini.x) + 0.00001);
// If the slope is too horizontal, discard the line
// If not, save them and their respective slope
if (std::abs(slope) > slope_thresh) {
slopes.push_back(slope);
selected_lines.push_back(i);
}
}
// Split the lines into right and left lines
img_center = static_cast((img_edges.cols / 2));
while (j < selected_lines.size()) {
ini = cv::Point(selected_lines[j][0], selected_lines[j][1]);
fini = cv::Point(selected_lines[j][2], selected_lines[j][3]);
// Condition to classify line as left side or right side
if (slopes[j] > 0 && fini.x > img_center && ini.x > img_center) {
right_lines.push_back(selected_lines[j]);
right_flag = true;
}
else if (slopes[j] < 0 && fini.x < img_center && ini.x < img_center) {
left_lines.push_back(selected_lines[j]);
left_flag = true;
}
j++;
}
output[0] = right_lines;
output[1] = left_lines;
return output;
}
// REGRESSION FOR LEFT AND RIGHT LINES
/**
*@brief Regression takes all the classified line segments initial and final points and fits a new lines out of them using the method of least squares.
*@brief This is done for both sides, left and right.
*@param left_right_lines is the output of the lineSeparation function
*@param inputImage is used to select where do the lines will end
*@return output contains the initial and final points of both lane boundary lines
*/
std::vector LaneDetector::regression(std::vector > left_right_lines, cv::Mat inputImage) {
std::vector output(4);
cv::Point ini;
cv::Point fini;
cv::Point ini2;
cv::Point fini2;
cv::Vec4d right_line;
cv::Vec4d left_line;
std::vector right_pts;
std::vector left_pts;
// If right lines are being detected, fit a line using all the init and final points of the lines
if (right_flag == true) {
for (auto i : left_right_lines[0]) {
ini = cv::Point(i[0], i[1]);
fini = cv::Point(i[2], i[3]);
right_pts.push_back(ini);
right_pts.push_back(fini);
}
if (right_pts.size() > 0) {
// The right line is formed here
cv::fitLine(right_pts, right_line, CV_DIST_L2, 0, 0.01, 0.01);
right_m = right_line[1] / right_line[0];
right_b = cv::Point(right_line[2], right_line[3]);
}
}
// If left lines are being detected, fit a line using all the init and final points of the lines
if (left_flag == true) {
for (auto j : left_right_lines[1]) {
ini2 = cv::Point(j[0], j[1]);
fini2 = cv::Point(j[2], j[3]);
left_pts.push_back(ini2);
left_pts.push_back(fini2);
}
if (left_pts.size() > 0) {
// The left line is formed here
cv::fitLine(left_pts, left_line, CV_DIST_L2, 0, 0.01, 0.01);
left_m = left_line[1] / left_line[0];
left_b = cv::Point(left_line[2], left_line[3]);
}
}
// One the slope and offset points have been obtained, apply the line equation to obtain the line points
int ini_y = inputImage.rows;
int fin_y = 470;
double right_ini_x = ((ini_y - right_b.y) / right_m) + right_b.x;
double right_fin_x = ((fin_y - right_b.y) / right_m) + right_b.x;
double left_ini_x = ((ini_y - left_b.y) / left_m) + left_b.x;
double left_fin_x = ((fin_y - left_b.y) / left_m) + left_b.x;
output[0] = cv::Point(right_ini_x, ini_y);
output[1] = cv::Point(right_fin_x, fin_y);
output[2] = cv::Point(left_ini_x, ini_y);
output[3] = cv::Point(left_fin_x, fin_y);
return output;
}
// TURN PREDICTION
/**
*@brief Predict if the lane is turning left, right or if it is going straight
*@brief It is done by seeing where the vanishing point is with respect to the center of the image
*@return String that says if there is left or right turn or if the road is straight
*/
std::string LaneDetector::predictTurn() {
std::string output;
double vanish_x;
double thr_vp = 10;
// The vanishing point is the point where both lane boundary lines intersect
vanish_x = static_cast(((right_m*right_b.x) - (left_m*left_b.x) - right_b.y + left_b.y) / (right_m - left_m));
// The vanishing points location determines where is the road turning
if (vanish_x < (img_center - thr_vp))
output = "Left Turn";
else if (vanish_x >(img_center + thr_vp))
output = "Right Turn";
else if (vanish_x >= (img_center - thr_vp) && vanish_x <= (img_center + thr_vp))
output = "Straight";
return output;
}
// PLOT RESULTS
/**
*@brief This function plots both sides of the lane, the turn prediction message and a transparent polygon that covers the area inside the lane boundaries
*@param inputImage is the original captured frame
*@param lane is the vector containing the information of both lines
*@param turn is the output string containing the turn information
*@return The function returns a 0
*/
int LaneDetector::plotLane(cv::Mat inputImage, std::vector lane, std::string turn) {
std::vector poly_points;
cv::Mat output;
// Create the transparent polygon for a better visualization of the lane
inputImage.copyTo(output);
poly_points.push_back(lane[2]);
poly_points.push_back(lane[0]);
poly_points.push_back(lane[1]);
poly_points.push_back(lane[3]);
cv::fillConvexPoly(output, poly_points, cv::Scalar(0, 0, 255), CV_AA, 0);
cv::addWeighted(output, 0.3, inputImage, 1.0 - 0.3, 0, inputImage);
// Plot both lines of the lane boundary
cv::line(inputImage, lane[0], lane[1], cv::Scalar(0, 255, 255), 5, CV_AA);
cv::line(inputImage, lane[2], lane[3], cv::Scalar(0, 255, 255), 5, CV_AA);
// Plot the turn message
cv::putText(inputImage, turn, cv::Point(50, 90), cv::FONT_HERSHEY_COMPLEX_SMALL, 3, cvScalar(0, 255, 0), 1, CV_AA);
// Show the final output image
cv::namedWindow("Lane", CV_WINDOW_AUTOSIZE);
cv::imshow("Lane", inputImage);
return 0;
}
如果播放的视频,用这个cpp
#include "stdafx.h"
#include
#include
#include
#include
#include "LaneDetector.h"
//#include "LaneDetector.cpp"
/**
*@brief Function main that runs the main algorithm of the lane detection.
*@brief It will read a video of a car in the highway and it will output the
*@brief same video but with the plotted detected lane
*@param argv[] is a string to the full path of the demo video
*@return flag_plot tells if the demo has sucessfully finished
*/
int main() {
// The input argument is the location of the video
cv::VideoCapture cap("project_video.mp4");
if (!cap.isOpened())
return -1;
LaneDetector lanedetector; // Create the class object
cv::Mat frame;
cv::Mat img_denoise;
cv::Mat img_edges;
cv::Mat img_mask;
cv::Mat img_lines;
std::vector lines;
std::vector > left_right_lines;
std::vector lane;
std::string turn;
int flag_plot = -1;
int i = 0;
// Main algorithm starts. Iterate through every frame of the video
while (i < 540) {
// Capture frame
if (!cap.read(frame))
break;
// Denoise the image using a Gaussian filter
img_denoise = lanedetector.deNoise(frame);
// Detect edges in the image
img_edges = lanedetector.edgeDetector(img_denoise);
// Mask the image so that we only get the ROI
img_mask = lanedetector.mask(img_edges);
// Obtain Hough lines in the cropped image
lines = lanedetector.houghLines(img_mask);
if (!lines.empty())
{
// Separate lines into left and right lines
left_right_lines = lanedetector.lineSeparation(lines, img_edges);
// Apply regression to obtain only one line for each side of the lane
lane = lanedetector.regression(left_right_lines, frame);
// Predict the turn by determining the vanishing point of the the lines
turn = lanedetector.predictTurn();
// Plot lane detection
flag_plot = lanedetector.plotLane(frame, lane, turn);
i += 1;
cv::waitKey(25);
}
else {
flag_plot = -1;
}
}
return flag_plot;
}
unity的dll打包用这个,如果需要的是控制台程序,只需要将lanedet函数换成main函数
打包dll需要在第一个脚本的最后一行添加这句话才能用
extern "C" __declspec(dllexport) int lanedet();
#include "stdafx.h"
#include
#include
#include
#include
#include "LaneDetector.h"
//using namespace cv;
//using namespace std;
LaneDetector lanedetector; // Create the class object
cv::Mat frame;
cv::Mat img_denoise;
cv::Mat img_edges;
cv::Mat img_mask;
cv::Mat img_lines;
std::vector lines;
std::vector > left_right_lines;
std::vector lane;
std::string turn;
int flag_plot = -1;
int i = 0;
int lanedet(){
//int lanedet(BYTE * src)
frame = cv::imread("D:\\hehe.png"); //cv::imread("C:\\Users\\Administrator\\Desktop\\image\\haha.png");
//imshow("asdf",frame);
//cv::waitKey();
img_denoise = lanedetector.deNoise(frame);
// Detect edges in the image
img_edges = lanedetector.edgeDetector(img_denoise);
// Mask the image so that we only get the ROI
img_mask = lanedetector.mask(img_edges);
// Obtain Hough lines in the cropped image
lines = lanedetector.houghLines(img_mask);
if (!lines.empty())
{
// Separate lines into left and right lines
left_right_lines = lanedetector.lineSeparation(lines, img_edges);
// Apply regression to obtain only one line for each side of the lane
lane = lanedetector.regression(left_right_lines, frame);
// Predict the turn by determining the vanishing point of the the lines
turn = lanedetector.predictTurn();
// Plot lane detection
flag_plot = lanedetector.plotLane(frame, lane, turn);
//i += 1;
cv::waitKey(20);
}
else {
//flag_plot = -1;
return 0;
}
getchar();
return 1;
}
unity生成图片方法和调用dll的方法
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using System.IO;
using System.Runtime.InteropServices;
using System.Threading;
public class LaneDetector : MonoBehaviour {
[DllImport("LaneIdentificationImg")]
extern static int lanedet();
[SerializeField]
Camera sensorcamera;
RenderTexture rt;
void Start () {
InvokeRepeating("imginvoke",5,1f);
}
void Update () {
rt = sensorcamera.targetTexture;
}
void imginvoke()
{
OutputRt(sensorcamera.targetTexture, 0);
lanedet();
}
public void OutputRt(RenderTexture rt, int idx = 0)
{
rt = sensorcamera.targetTexture;
RenderTexture.active = rt;
Texture2D png = new Texture2D(rt.width, rt.height, TextureFormat.RGB24, false);
png.ReadPixels(new Rect(0, 0, rt.width, rt.height), 0, 0);
byte[] dataBytes = png.EncodeToPNG();
//int[] data = new int[dataBytes.Length];
//for (int i = 0; i < dataBytes.Length; i++)
//{
// data[i] = (int)dataBytes[i];
//}
//if (lanedet(dataBytes) == 0)
//{
// return;
//}
string strSaveFile = "D:\\hehe" + ".png";
FileStream fs = File.Open(strSaveFile, FileMode.OpenOrCreate);
fs.Write(dataBytes, 0, dataBytes.Length);
fs.Flush();
fs.Close();
png = null;
RenderTexture.active = null;
}
}