对象具有许多全局特征,如颜色和形状,将对象作为一个整体描述。这些特征可用于检测对象并以一系列帧跟踪它。在本节中,我们将使用颜色作为特征来检测具有特定颜色的对象。当要检测的对象具有特定颜色且该颜色与背景颜色不同时此方法很有用。如果对象和背景具有相同的颜色,则此方法将检测失败。本节中,我们将尝试使用OpenCV和CUDA从网络摄像机流中检测任意蓝色对象。
#include
#include
#include
#include
int main(int argc, char** argv)
{
cv::VideoCapture cap(0); //capture the video from web cam
// if webcam is not available then exit the program
if (!cap.isOpened())
{
std::cout << "Cannot open the web cam" << std::endl;
return -1;
}
while (true)
{
cv::Mat frame;
// read a new frame from webcam
bool flag = cap.read(frame);
if (!flag)
{
std::cout << "Cannot read a frame from webcam" << std::endl;
break;
}
cv::cuda::GpuMat d_frame, d_frame_hsv, d_intermediate, d_result;
cv::cuda::GpuMat d_frame_shsv[3];
cv::cuda::GpuMat d_thresc[3];
cv::Mat h_result;
d_frame.upload(frame);
//Transform image to HSV
cv::cuda::cvtColor(d_frame, d_frame_hsv, cv::COLOR_BGR2HSV);
//Split HSV 3 channels
cv::cuda::split(d_frame_hsv, d_frame_shsv);
//Threshold HSV channels
cv::cuda::threshold(d_frame_shsv[0], d_thresc[0], 110, 130, cv::THRESH_BINARY);
cv::cuda::threshold(d_frame_shsv[1], d_thresc[1], 50, 255, cv::THRESH_BINARY);
cv::cuda::threshold(d_frame_shsv[2], d_thresc[2], 50, 255, cv::THRESH_BINARY);
//Bitwise AND the channels
cv::cuda::bitwise_and(d_thresc[0], d_thresc[1], d_intermediate);
cv::cuda::bitwise_and(d_intermediate, d_thresc[2], d_result);
d_result.download(h_result);
imshow("Thresholded Image", h_result);
imshow("Original", frame);
if (cv::waitKey(1) == 'q')
{
break;
}
}
return 0;
}
#include
#include
#include
int main()
{
cv::Mat h_image = cv::imread("images/drawing.JPG", 0);
if (h_image.empty())
{
std::cout << "can not open image"<< std::endl;
return -1;
}
cv::cuda::GpuMat d_edge,d_image;
cv::Mat h_edge;
d_image.upload(h_image);
cv::Ptr<cv::cuda::CannyEdgeDetector> canny_edge = cv::cuda::createCannyEdgeDetector(2.0, 100.0, 3, false);
canny_edge->detect(d_image, d_edge);
d_edge.download(h_edge);
cv::imshow("source", h_image);
cv::imshow("detected edges", h_edge);
cv::waitKey(0);
return 0;
}
#include
#include
#include
int main()
{
cv::Mat h_image = cv::imread("images/drawing.JPG",0);
if (h_image.empty())
{
std::cout << "can not open image"<< std::endl;
return -1;
}
cv::Mat h_edge;
cv::Canny(h_image, h_edge, 100, 200, 3);
cv::Mat h_imagec;
cv::cvtColor(h_edge, h_imagec, cv::COLOR_GRAY2BGR);
cv::Mat h_imageg = h_imagec.clone();
std::vector<cv::Vec4i> h_lines;
{
const int64 start = cv::getTickCount();
HoughLinesP(h_edge, h_lines, 1, CV_PI / 180, 50, 60, 5);
const double time_elapsed = (cv::getTickCount() - start) / cv::getTickFrequency();
std::cout << "CPU Time : " << time_elapsed * 1000 << " ms" << std::endl;
std::cout << "CPU FPS : " << (1/time_elapsed) << std::endl;
}
for (size_t i = 0; i < h_lines.size(); ++i)
{
cv::Vec4i line_point = h_lines[i];
cv::line(h_imagec, cv::Point(line_point[0], line_point[1]), cv::Point(line_point[2], line_point[3]), cv::Scalar(0, 0, 255), 2, cv::LINE_AA);
}
cv::cuda::GpuMat d_edge, d_lines;
d_edge.upload(h_edge);
{
const int64 start = cv::getTickCount();
cv::Ptr<cv::cuda::HoughSegmentDetector> hough = cv::cuda::createHoughSegmentDetector(1.0f, (float) (CV_PI / 180.0f), 50, 5);
hough->detect(d_edge, d_lines);
const double time_elapsed = (cv::getTickCount() - start) / cv::getTickFrequency();
std::cout << "GPU Time : " << time_elapsed * 1000 << " ms" << std::endl;
std::cout << "GPU FPS : " << (1/time_elapsed) << std::endl;
}
std::vector<cv::Vec4i> lines_g;
if (!d_lines.empty())
{
lines_g.resize(d_lines.cols);
cv::Mat h_lines(1, d_lines.cols, CV_32SC4, &lines_g[0]);
d_lines.download(h_lines);
}
for (size_t i = 0; i < lines_g.size(); ++i)
{
cv::Vec4i line_point = lines_g[i];
cv::line(h_imageg, cv::Point(line_point[0], line_point[1]), cv::Point(line_point[2], line_point[3]), cv::Scalar(0, 0, 255), 2, cv::LINE_AA);
}
cv::imshow("source", h_image);
cv::imshow("detected lines [CPU]", h_imagec);
cv::imshow("detected lines [GPU]", h_imageg);
cv::imwrite("hough_source.png", h_image);
cv::imwrite("hough_cpu_line.png", h_imagec);
cv::imwrite("hough_gpu_line.png", h_imageg);
cv::waitKey(0);
return 0;
}
#include
#include
#include
int main(int argc, char** argv)
{
cv::Mat h_image = cv::imread("images/eight.tif", 1);
cv::Mat h_gray;
cv::cvtColor(h_image, h_gray, cv::COLOR_BGR2GRAY);
cv::cuda::GpuMat d_gray, d_result;
std::vector<cv::Vec3f> d_Circles;
cv::medianBlur(h_gray, h_gray, 5);
cv::Ptr<cv::cuda::HoughCirclesDetector> detector = cv::cuda::createHoughCirclesDetector(1, 100, 122, 50, 1, std::max(h_image.size().width, h_image.size().height));
d_gray.upload(h_gray);
detector->detect(d_gray, d_result);
d_Circles.resize(d_result.size().width);
if (!d_Circles.empty())
d_result.row(0).download(cv::Mat(d_Circles).reshape(3, 1));
std::cout << "No of circles: " << d_Circles.size() << std::endl;
for (size_t i = 0; i < d_Circles.size(); i++)
{
cv::Vec3i cir = d_Circles[i];
cv::circle(h_image, cv::Point(cir[0], cir[1]), cir[2], cv::Scalar(255, 0, 0), 2, cv::LINE_AA);
}
cv::imshow("detected circles", h_image);
cv::waitKey(0);
return 0;
}
#include
#include
#include
int main()
{
cv::Mat h_image = cv::imread("images/drawing.JPG", 0);
//Detect the keypoints using FAST Detector
cv::Ptr<cv::cuda::FastFeatureDetector> detector = cv::cuda::FastFeatureDetector::create(100, true, 2);
std::vector<cv::KeyPoint> keypoints;
cv::cuda::GpuMat d_image;
d_image.upload(h_image);
detector->detect(d_image, keypoints);
cv::drawKeypoints(h_image, keypoints, h_image);
//Show detected keypoints
cv::imshow("Final Result", h_image);
cv::waitKey(0);
return 0;
}
#include
#include
#include
int main()
{
cv::Mat h_image = cv::imread("images/drawing.JPG", 0);
cv::Ptr<cv::cuda::ORB> detector = cv::cuda::ORB::create();
std::vector<cv::KeyPoint> keypoints;
cv::cuda::GpuMat d_image;
d_image.upload(h_image);
detector->detect(d_image, keypoints);
cv::drawKeypoints(h_image, keypoints, h_image);
cv::imshow("Final Result", h_image);
cv::waitKey(0);
return 0;
}
#include
#include
#include
#include
#include
int main(int argc, char** argv)
{
cv::Mat h_object_image = cv::imread("images/object1.jpg", 0);
cv::Mat h_scene_image = cv::imread("images/scene1.jpg", 0);
cv::cuda::GpuMat d_object_image;
cv::cuda::GpuMat d_scene_image;
cv::cuda::GpuMat d_keypoints_scene, d_keypoints_object;
std::vector<cv::KeyPoint> h_keypoints_scene, h_keypoints_object;
cv::cuda::GpuMat d_descriptors_scene, d_descriptors_object;
d_object_image.upload(h_object_image);
d_scene_image.upload(h_scene_image);
cv::cuda::SURF_CUDA surf(100);
surf(d_object_image, cv::cuda::GpuMat(), d_keypoints_object, d_descriptors_object);
surf(d_scene_image, cv::cuda::GpuMat(), d_keypoints_scene, d_descriptors_scene);
cv::Ptr<cv::cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher();
std::vector<std::vector<cv::DMatch>> d_matches;
matcher->knnMatch(d_descriptors_object, d_descriptors_scene, d_matches, 2);
surf.downloadKeypoints(d_keypoints_scene, h_keypoints_scene);
surf.downloadKeypoints(d_keypoints_object, h_keypoints_object);
std::vector<cv::DMatch> good_matches;
for (int k = 0; k < std::min(h_keypoints_object.size() - 1, d_matches.size()); k++)
{
if ((d_matches[k][0].distance < 0.6 * (d_matches[k][1].distance)) && ((int)d_matches[k].size() <= 2 && (int)d_matches[k].size() > 0))
{
good_matches.push_back(d_matches[k][0]);
}
}
std::cout << "size:" << good_matches.size();
cv::Mat h_image_result;
cv::drawMatches(h_object_image, h_keypoints_object, h_scene_image, h_keypoints_scene,
good_matches, h_image_result, cv::Scalar::all(-1), cv::Scalar::all(-1),
std::vector<char>(), cv::DrawMatchesFlags::DEFAULT);
std::vector<cv::Point2f> object;
std::vector<cv::Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
object.push_back(h_keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(h_keypoints_scene[good_matches[i].trainIdx].pt);
}
cv::Mat Homo = cv::findHomography(object, scene, cv::RANSAC);
std::vector<cv::Point2f> corners(4);
std::vector<cv::Point2f> scene_corners(4);
corners[0] = cv::Point(0, 0);
corners[1] = cv::Point(h_object_image.cols, 0);
corners[2] = cv::Point(h_object_image.cols, h_object_image.rows);
corners[3] = cv::Point(0, h_object_image.rows);
cv::perspectiveTransform(corners, scene_corners, Homo);
cv::line(h_image_result, scene_corners[0] + cv::Point2f(h_object_image.cols, 0), scene_corners[1] + cv::Point2f(h_object_image.cols, 0), cv::Scalar(255, 0, 0), 4);
cv::line(h_image_result, scene_corners[1] + cv::Point2f(h_object_image.cols, 0), scene_corners[2] + cv::Point2f(h_object_image.cols, 0), cv::Scalar(255, 0, 0), 4);
cv::line(h_image_result, scene_corners[2] + cv::Point2f(h_object_image.cols, 0), scene_corners[3] + cv::Point2f(h_object_image.cols, 0), cv::Scalar(255, 0, 0), 4);
cv::line(h_image_result, scene_corners[3] + cv::Point2f(h_object_image.cols, 0), scene_corners[0] + cv::Point2f(h_object_image.cols, 0), cv::Scalar(255, 0, 0), 4);
cv::imshow("Good Matches & Object detection", h_image_result);
cv::waitKey(0);
return 0;
}
#include
#include
#include
int main()
{
cv::Mat h_image = cv::imread("Chapter7//lena_color_512.tif", 0);
cv::Ptr<cv::cuda::CascadeClassifier> cascade = cv::cuda::CascadeClassifier::create("Chapter7//haarcascade_frontalface_alt2.xml");
cv::cuda::GpuMat d_image;
cv::cuda::GpuMat d_buf;
d_image.upload(h_image);
std::cout << "No detection." << std::endl;
cascade->detectMultiScale(d_image, d_buf);
std::cout << "No detection." << std::endl;
std::vector<cv::Rect> detections;
cascade->convert(d_buf, detections);
if (detections.empty())
std::cout << "No detection." << std::endl;
cv::cvtColor(h_image, h_image, cv::COLOR_GRAY2BGR);
for (int i = 0; i < detections.size(); ++i)
{
cv::rectangle(h_image, detections[i], cv::Scalar(0, 255, 255), 5);
}
cv::imshow("Result image", h_image);
cv::waitKey(0);
return 0;
}
#include
#include
#include
#include
int main()
{
cv::Mat h_image = cv::imread("images/lena_color_512.tif", 0);
cv::Ptr<cv::cuda::CascadeClassifier> cascade = cv::cuda::CascadeClassifier::create("Chapter7//haarcascade_eye.xml");
cv::cuda::GpuMat d_image;
cv::cuda::GpuMat d_buf;
d_image.upload(h_image);
cascade->detectMultiScale(d_image, d_buf);
std::vector<cv::Rect> detections;
cascade->convert(d_buf, detections);
if (detections.empty())
std::cout << "No detection." << std::endl;
cv::cuda::cvtColor(h_image, h_image, cv::COLOR_GRAY2BGR);
for (int i = 0; i < detections.size(); ++i)
{
rectangle(h_image, detections[i], cv::Scalar(0, 255, 255), 5);
}
cv::imshow("Result image", h_image);
cv::waitKey(0);
return 0;
}