#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// Read the image
Mat img = imread("images/cameraman.tif",0);
// Check for failure in reading an Image
if (img.empty())
{
cout << "Could not open an image" << endl;
return -1;
}
// 可视化窗口名
String win_name = "My First Opencv Program";
// Create a window
namedWindow(win_name);
// Show our image inside the created window.
imshow(win_name, img);
// Wait for any keystroke in the window
waitKey(0);
//destroy the created window
destroyWindow(win_name);
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// 创建单通道 256x256 0值 矩阵,黑色图
Mat img(256, 256, CV_8UC1, Scalar(0));
String win_name = "Blank Image";
namedWindow(win_name);
imshow(win_name, img);
waitKey(0);
destroyWindow(win_name);
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// 通道顺序 bgr
Mat img(256, 256, CV_8UC3, Scalar(255,0,0));
String win_name = "Blank Blue Color Image";
namedWindow(win_name);
imshow(win_name, img);
waitKey(0);
destroyWindow(win_name);
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// 3通道 512*512图像
Mat img(512, 512, CV_8UC3, Scalar(0,0,0));
// 画线条,起点,终点,颜色,粗细
line(img,Point(0,0),Point(511,511),Scalar(0,255,0),7);
// 画长方形
rectangle(img,Point(384,0),Point(510,128),Scalar(255,255,0),5);
// 画圆
circle(img,Point(447,63), 63, Scalar(0,0,255), -1);
// 画椭圆
ellipse(img,Point(256,256),Point(100,100),0,0,180,255,-1);
// 显示文字
putText( img, "OpenCV!", Point(10,500), FONT_HERSHEY_SIMPLEX, 3,
Scalar(255, 255, 255), 5, 8 );
String win_name = "Blank Blue Color Image"; //Name of the window
namedWindow(win_name); // Create a window
imshow(win_name, img); // Show our image inside the created window.
waitKey(0); // Wait for any keystroke in the window
destroyWindow(win_name); //destroy the created window
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
// 打开视频文件
VideoCapture cap("images/rhinos.avi");
// if not success, exit program
if (cap.isOpened() == false)
{
cout << "Cannot open the video file" << endl;
return -1;
}
cout<<"Press Q to Quit" << endl;
String win_name = "First Video";
namedWindow(win_name);
while (true)
{
Mat frame;
// read a frame
bool flag = cap.read(frame);
//Breaking the while loop at the end of the video
if (flag == false)
{
break;
}
//display the frame
imshow(win_name, frame);// 显示该帧图像
//Wait for 100 ms and key 'q' for exit
if (waitKey(100) == 'q')
{
break;
}
}
destroyWindow(win_name);
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
//open the Webcam
VideoCapture cap(0); // 打开0号摄像头
// if not success, exit program
if (cap.isOpened() == false)
{
cout << "Cannot open Webcam" << endl;
return -1;
}
//get the frames rate of the video
double fps = cap.get(CAP_PROP_FPS);
cout << "Frames per seconds : " << fps << endl;
cout<<"Press Q to Quit" <<endl;
String win_name = "Webcam Video";
namedWindow(win_name); //create a window
while (true)
{
Mat frame;
bool flag = cap.read(frame); // read a new frame from video
//show the frame in the created window
imshow(win_name, frame);
if (waitKey(1) == 'q')
{
break;
}
}
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
// 读取图像,存储在cpu上
cv::Mat h_img1 = cv::imread("images/cameraman.tif");
cv::Mat h_img2 = cv::imread("images/circles.png");
cv::Mat h_result1;
// 定义GPU mat数据
cv::cuda::GpuMat d_result1,d_img1, d_img2;
// cpu上的图像 上传到 GPU中
d_img1.upload(h_img1);
d_img2.upload(h_img2);
// 调用GPU接执行 mat add
cv::cuda::add(d_img1,d_img2, d_result1);
// 下载结果,GPU结果 到 CPU中
d_result1.download(h_result1);
//显示结果
cv::imshow("Image1 ", h_img1);
cv::imshow("Image2 ", h_img2);
cv::imshow("Result addition ", h_result1);
cv::imwrite("images/result_add.png", h_result1);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
//Read Two Images
cv::Mat h_img1 = cv::imread("images/cameraman.tif");
cv::Mat h_img2 = cv::imread("images/circles.png");
cv::Mat h_result1;
// 定义GPU数据
cv::cuda::GpuMat d_result1,d_img1, d_img2;
// CPU 到 GPU
d_img1.upload(h_img1);
d_img2.upload(h_img2);
// 调用GPU接执行 mat substract
cv::cuda::subtract(d_img1, d_img2,d_result1);
// gpu 结果 到 cpu
d_result1.download(h_result1);
// 显示,保存
cv::imshow("Image1 ", h_img1);
cv::imshow("Image2 ", h_img2);
cv::imshow("Result Subtraction ", h_result1);
cv::imwrite("images/result_add.png", h_result1);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
//Read Two Images
cv::Mat h_img1 = cv::imread("images/cameraman.tif");
cv::Mat h_img2 = cv::imread("images/circles.png");
cv::Mat h_result1;
//定义GPU数据
cv::cuda::GpuMat d_result1,d_img1, d_img2;
// 上传 输入
d_img1.upload(h_img1);
d_img2.upload(h_img2);
// 图像叠加 d_result1 = 0.7*d_img1 + d_img2*0.3 + 0.0
cv::cuda::addWeighted(d_img1,0.7,d_img2,0.3,0,d_result1);
// 下载结果
d_result1.download(h_result1);
cv::imshow("Image1 ", h_img1);
cv::imshow("Image2 ", h_img2);
cv::imshow("Result blending ", h_result1);
cv::imwrite("images/result_add.png", h_result1);
cv::waitKey();
return 0;
}
// bitwise_and是对二进制数据进行“与”操作,即对图像(灰度图像或彩色图像均可)每个像素值进行二进制“与”操作,1&1=1,1&0=0,0&1=0,0&0=0
//bitwise_or是对二进制数据进行“或”操作,即对图像(灰度图像或彩色图像均可)每个像素值进行二进制“或”操作,1|1=1,1|0=0,0|1=0,0|0=0
//bitwise_xor是对二进制数据进行“异或”操作,即对图像(灰度图像或彩色图像均可)每个像素值进行二进制“异或”操作,1^1=0,1^0=1,0^1=1,0^0=0
//bitwise_not是对二进制数据进行“非”操作,即对图像(灰度图像或彩色图像均可)每个像素值进行二进制“非”操作,~1=0,~0=1
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
cv::Mat h_img1 = cv::imread("images/circles.png");
//Create Device variables
cv::cuda::GpuMat d_result1,d_img1;
cv::Mat h_result1;
//Upload Image to device
d_img1.upload(h_img1);
cv::cuda::bitwise_not(d_img1,d_result1);// 图像非操作 d_result1 = ~d_img1
//Download result back to host
d_result1.download(h_result1);
cv::imshow("Result inversion ", h_result1);
cv::imwrite("images/result_inversion.png", h_result1);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
cv::Mat h_img1 = cv::imread("images/autumn.tif");
//Define device variables
cv::cuda::GpuMat d_result1,d_result2,d_result3,d_result4,d_img1;
//Upload Image to device
d_img1.upload(h_img1);
// GPU 接口 图像 颜色空间转换
cv::cuda::cvtColor(d_img1, d_result1,cv::COLOR_BGR2GRAY);// 彩色到 灰度
cv::cuda::cvtColor(d_img1, d_result2,cv::COLOR_BGR2RGB); // bgr彩色 到 RGB
cv::cuda::cvtColor(d_img1, d_result3,cv::COLOR_BGR2HSV); // bgr彩色 到 HSV
cv::cuda::cvtColor(d_img1, d_result4,cv::COLOR_BGR2YCrCb);// bgr彩色 到 YCrCb
cv::Mat h_result1,h_result2,h_result3,h_result4;
// 下载结果到 cpu
d_result1.download(h_result1);
d_result2.download(h_result2);
d_result3.download(h_result3);
d_result4.download(h_result4);
// 下载
cv::imshow("Result in Gray ", h_result1);
cv::imshow("Result in RGB", h_result2);
cv::imshow("Result in HSV ", h_result3);
cv::imshow("Result in YCrCb ", h_result4);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif", 0);
//Define device variables
cv::cuda::GpuMat d_result1,d_result2,d_result3,d_result4,d_result5, d_img1;
//Upload image on device
d_img1.upload(h_img1);
// GPU图像阈值操作, 阈值, 最大值,
cv::cuda::threshold(d_img1, d_result1, 128.0, 255.0, cv::THRESH_BINARY); // 二值化,大于阈值为1
cv::cuda::threshold(d_img1, d_result2, 128.0, 255.0, cv::THRESH_BINARY_INV);// 二值化,大于阈值为0
cv::cuda::threshold(d_img1, d_result3, 128.0, 255.0, cv::THRESH_TRUNC); // 上截断,大于阈值,截断为阈值
cv::cuda::threshold(d_img1, d_result4, 128.0, 255.0, cv::THRESH_TOZERO); // 下截断到0,小于阈值,截断为0
cv::cuda::threshold(d_img1, d_result5, 128.0, 255.0, cv::THRESH_TOZERO_INV);// 上截断到0,大于阈值,截断为0
cv::Mat h_result1,h_result2,h_result3,h_result4,h_result5;
//Copy results back to host
d_result1.download(h_result1);
d_result2.download(h_result2);
d_result3.download(h_result3);
d_result4.download(h_result4);
d_result5.download(h_result5);
cv::imshow("Result Threshhold binary ", h_result1);
cv::imshow("Result Threshhold binary inverse ", h_result2);
cv::imshow("Result Threshhold truncated ", h_result3);
cv::imshow("Result Threshhold truncated to zero ", h_result4);
cv::imshow("Result Threshhold truncated to zero inverse ", h_result5);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
cv::Mat src = cv::imread("images/cameraman.tif", 0);
cv::Mat result_host1,result_host2,result_host3,result_host4,result_host5;
//Get initial time in miliseconds
int64 work_begin = cv::getTickCount(); // 计时
cv::threshold(src, result_host1, 128.0, 255.0, cv::THRESH_BINARY);
cv::threshold(src, result_host2, 128.0, 255.0, cv::THRESH_BINARY_INV);
cv::threshold(src, result_host3, 128.0, 255.0, cv::THRESH_TRUNC);
cv::threshold(src, result_host4, 128.0, 255.0, cv::THRESH_TOZERO);
cv::threshold(src, result_host5, 128.0, 255.0, cv::THRESH_TOZERO_INV);
//Get time after work has finished
int64 delta = cv::getTickCount() - work_begin;
//Frequency of timer
double freq = cv::getTickFrequency();
double work_fps = freq / delta;
std::cout<<"Performance of Thresholding on CPU: " <<std::endl;
std::cout <<"Time: " << (1/work_fps) <<std::endl;
std::cout <<"FPS: " <<work_fps <<std::endl;
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main (int argc, char* argv[])
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif", 0);
cv::cuda::GpuMat d_result1,d_result2,d_result3,d_result4,d_result5, d_img1;
//Measure initial time ticks
int64 work_begin = cv::getTickCount(); // 计时
d_img1.upload(h_img1);
cv::cuda::threshold(d_img1, d_result1, 128.0, 255.0, cv::THRESH_BINARY);
cv::cuda::threshold(d_img1, d_result2, 128.0, 255.0, cv::THRESH_BINARY_INV);
cv::cuda::threshold(d_img1, d_result3, 128.0, 255.0, cv::THRESH_TRUNC);
cv::cuda::threshold(d_img1, d_result4, 128.0, 255.0, cv::THRESH_TOZERO);
cv::cuda::threshold(d_img1, d_result5, 128.0, 255.0, cv::THRESH_TOZERO_INV);
cv::Mat h_result1,h_result2,h_result3,h_result4,h_result5;
d_result1.download(h_result1);
d_result2.download(h_result2);
d_result3.download(h_result3);
d_result4.download(h_result4);
d_result5.download(h_result5);
//Measure difference in time ticks
int64 delta = cv::getTickCount() - work_begin;
double freq = cv::getTickFrequency();
//Measure frames per second
double work_fps = freq / delta;
std::cout <<"Performance of Thresholding on GPU: " <<std::endl;
std::cout <<"Time: " << (1/work_fps) <<std::endl;
std::cout <<"FPS: " <<work_fps <<std::endl;
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif",0);
cv::cuda::GpuMat d_img1,d_result1;
d_img1.upload(h_img1);
// GPU接口 直方图均衡化
cv::cuda::equalizeHist(d_img1, d_result1);
cv::Mat h_result1;
d_result1.download(h_result1);
cv::imshow("Original Image ", h_img1);
cv::imshow("Histogram Equalized Image", h_result1);
cv::imwrite("images/result_inversion.png", h_img1);
cv::imwrite("images/result_inversion.png", h_result1);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/autumn.tif");
cv::Mat h_img2,h_result1;
cvtColor(h_img1, h_img2, cv::COLOR_BGR2HSV);// 转成HSV空间
// split分割成 多个 单通道
std::vector< cv::Mat > vec_channels;
cv::split(h_img2, vec_channels);
// 对单个通道进行直方图均衡化
cv::equalizeHist(vec_channels[2], vec_channels[2]);
//M那个通道 合并
cv::merge(vec_channels, h_img2);
//Convert the histogram equalized image from HSV to BGR color space again
cv::cvtColor(h_img2,h_result1, cv::COLOR_HSV2BGR);
cv::imshow("Original Image ", h_img1);
cv::imshow("Histogram Equalized Image", h_result1);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif",0);
cv::cuda::GpuMat d_img1,d_result1,d_result2;
d_img1.upload(h_img1);
// 原图像尺寸
int width= d_img1.cols;
int height = d_img1.size().height;
// gpu cuda接口 图像变形
cv::cuda::resize(d_img1,d_result1,cv::Size(200, 200), cv::INTER_CUBIC);// 变形到固定尺寸,
cv::cuda::resize(d_img1,d_result2,cv::Size(0.5*width, 0.5*height), cv::INTER_LINEAR);// 缩小一半
// CV_INTER_NN - 最近-邻居插补
// CV_INTER_LINEAR - 双线性插值(默认方法)
// CV_INTER_AREA - 像素面积相关重采样。当缩小图像时,该方法可以避免波纹的出现。当放大图像时,类似于方法CV_INTER_NN。
// CV_INTER_CUBIC - 双三次插值。
cv::Mat h_result1,h_result2;
d_result1.download(h_result1);
d_result2.download(h_result2);
cv::imshow("Original Image ", h_img1);
cv::imshow("Resized Image", h_result1);
cv::imshow("Resized Image 2", h_result2);
cv::imwrite("Resized1.png", h_result1);
cv::imwrite("Resized2.png", h_result2);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif",0);
cv::cuda::GpuMat d_img1,d_result1,d_result2;
d_img1.upload(h_img1);
int cols= d_img1.cols;
int rows = d_img1.size().height;
// 平移变换,x,水平方向平移70,y垂直方向平移50
cv::Mat trans_mat = (cv::Mat_<double>(2,3) << 1, 0, 70,
0, 1, 50);
cv::cuda::warpAffine(d_img1,d_result1,trans_mat,d_img1.size());
// 旋转变换
cv::Point2f pt(d_img1.cols/2., d_img1.rows/2.); // 中心点
cv::Mat r = cv::getRotationMatrix2D(pt, 45, 1.0);// 旋转45度
cv::cuda::warpAffine(d_img1, d_result2, r, cv::Size(d_img1.cols, d_img1.rows));
cv::Mat h_result1,h_result2;
d_result1.download(h_result1);
d_result2.download(h_result2);
cv::imshow("Original Image ", h_img1);
cv::imshow("Translated Image", h_result1);
cv::imshow("Rotated Image", h_result2);
cv::imwrite("Translated.png", h_result1);
cv::imwrite("Rotated.png", h_result2);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif",0);
cv::cuda::GpuMat d_img1,d_result3x3,d_result5x5,d_result7x7;
d_img1.upload(h_img1);
// cuda滤波器指针
cv::Ptr<cv::cuda::Filter> filter3x3,filter5x5,filter7x7;
// 创建cuda滤波器 BoxFilter
filter3x3 = cv::cuda::createBoxFilter(CV_8UC1,CV_8UC1,cv::Size(3,3));
// 执行滤波器
filter3x3->apply(d_img1, d_result3x3);
filter5x5 = cv::cuda::createBoxFilter(CV_8UC1,CV_8UC1,cv::Size(5,5));
filter5x5->apply(d_img1, d_result5x5);
filter7x7 = cv::cuda::createBoxFilter(CV_8UC1,CV_8UC1,cv::Size(7,7));
filter7x7->apply(d_img1, d_result7x7);
cv::Mat h_result3x3,h_result5x5,h_result7x7;
d_result3x3.download(h_result3x3);
d_result5x5.download(h_result5x5);
d_result7x7.download(h_result7x7);
cv::imshow("Original Image ", h_img1);
cv::imshow("Blurred with kernel size 3x3", h_result3x3);
cv::imshow("Blurred with kernel size 5x5", h_result5x5);
cv::imshow("Blurred with kernel size 7x7", h_result7x7);
cv::imwrite("Blurred3x3.png", h_result3x3);
cv::imwrite("Blurred5x5.png", h_result5x5);
cv::imwrite("Blurred7x7.png", h_result7x7);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/cameraman.tif",0);
cv::cuda::GpuMat d_img1,d_result3x3,d_result5x5,d_result7x7;
d_img1.upload(h_img1);
// cuda滤波器指针
cv::Ptr<cv::cuda::Filter> filter3x3,filter5x5,filter7x7;
// 创建cuda滤波器 GaussianFilter
filter3x3 = cv::cuda::createGaussianFilter(CV_8UC1,CV_8UC1,cv::Size(3,3),1);
// 执行滤波器
filter3x3->apply(d_img1, d_result3x3);
filter5x5 = cv::cuda::createGaussianFilter(CV_8UC1,CV_8UC1,cv::Size(5,5),1);
filter5x5->apply(d_img1, d_result5x5);
filter7x7 = cv::cuda::createGaussianFilter(CV_8UC1,CV_8UC1,cv::Size(7,7),1);
filter7x7->apply(d_img1, d_result7x7);
cv::Mat h_result3x3,h_result5x5,h_result7x7;
d_result3x3.download(h_result3x3);
d_result5x5.download(h_result5x5);
d_result7x7.download(h_result7x7);
cv::imshow("Original Image ", h_img1);
cv::imshow("Blurred with kernel size 3x3", h_result3x3);
cv::imshow("Blurred with kernel size 5x5", h_result5x5);
cv::imshow("Blurred with kernel size 7x7", h_result7x7);
cv::imwrite("gBlurred3x3.png", h_result3x3);
cv::imwrite("gBlurred5x5.png", h_result5x5);
cv::imwrite("gBlurred7x7.png", h_result7x7);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/saltpepper.png",0);
cv::Mat h_result;
cv::medianBlur(h_img1,h_result,3);
cv::imshow("Original Image ", h_img1);
cv::imshow("Median Blur Result", h_result);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/blobs.png",0);
cv::cuda::GpuMat d_img1,d_resultx,d_resulty,d_resultxy;
d_img1.upload(h_img1);
cv::Ptr<cv::cuda::Filter> filterx,filtery,filterxy;
// 索贝尔滤波器
filterx = cv::cuda::createSobelFilter(CV_8UC1,CV_8UC1,1,0);// x水平方向
filterx->apply(d_img1, d_resultx);
filtery = cv::cuda::createSobelFilter(CV_8UC1,CV_8UC1,0,1);// y垂直方向
filtery->apply(d_img1, d_resulty);
cv::cuda::add(d_resultx,d_resulty,d_resultxy); // 叠加
cv::Mat h_resultx,h_resulty,h_resultxy;
d_resultx.download(h_resultx);
d_resulty.download(h_resulty);
d_resultxy.download(h_resultxy);
cv::imshow("Original Image ", h_img1);
cv::imshow("Sobel-x derivative", h_resultx);
cv::imshow("Sobel-y derivative", h_resulty);
cv::imshow("Sobel-xy derivative", h_resultxy);
cv::imwrite("sobelx.png", h_resultx);
cv::imwrite("sobely.png", h_resulty);
cv::imwrite("sobelxy.png", h_resultxy);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/blobs.png",0);
cv::cuda::GpuMat d_img1,d_resultx,d_resulty,d_resultxy;
d_img1.upload(h_img1);
cv::Ptr<cv::cuda::Filter> filterx,filtery;
// 创建滤波器
filterx = cv::cuda::createScharrFilter(CV_8UC1,CV_8UC1,1,0);
filterx->apply(d_img1, d_resultx);
filtery = cv::cuda::createScharrFilter(CV_8UC1,CV_8UC1,0,1);
filtery->apply(d_img1, d_resulty);
cv::cuda::add(d_resultx,d_resulty,d_resultxy);
cv::Mat h_resultx,h_resulty,h_resultxy;
d_resultx.download(h_resultx);
d_resulty.download(h_resulty);
d_resultxy.download(h_resultxy);
cv::imshow("Original Image ", h_img1);
cv::imshow("Scharr-x derivative", h_resultx);
cv::imshow("Scharr-y derivative", h_resulty);
cv::imshow("Scharr-xy derivative", h_resultxy);
cv::imwrite("scharrx.png", h_resultx);
cv::imwrite("scharry.png", h_resulty);
cv::imwrite("scharrxy.png", h_resultxy);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/blobs.png",0);
cv::cuda::GpuMat d_img1,d_result1,d_result3;
d_img1.upload(h_img1);
cv::Ptr<cv::cuda::Filter> filter1,filter3;
// 创建滤波器
filter1 = cv::cuda::createLaplacianFilter(CV_8UC1,CV_8UC1,1);
filter1->apply(d_img1, d_result1);
filter3 = cv::cuda::createLaplacianFilter(CV_8UC1,CV_8UC1,3);
filter3->apply(d_img1, d_result3);
cv::Mat h_result1,h_result3;
d_result1.download(h_result1);
d_result3.download(h_result3);
cv::imshow("Original Image ", h_img1);
cv::imshow("Laplacian Filter 1", h_result1);
cv::imshow("Laplacian Filter 3", h_result3);
cv::imwrite("laplacian1.png", h_result1);
cv::imwrite("laplacian3.png", h_result3);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
int main ()
{
cv::Mat h_img1 = cv::imread("images/blobs.png",0);
cv::cuda::GpuMat d_img1,d_resulte,d_resultd,d_resulto, d_resultc;
cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT,cv::Size(5,5));// 长方形核
d_img1.upload(h_img1);
cv::Ptr<cv::cuda::Filter> filtere,filterd,filtero,filterc;
filtere = cv::cuda::createMorphologyFilter(cv::MORPH_ERODE,CV_8UC1,element);// 腐蚀操作
filtere->apply(d_img1, d_resulte);
filterd = cv::cuda::createMorphologyFilter(cv::MORPH_DILATE,CV_8UC1,element);//膨胀操作
filterd->apply(d_img1, d_resultd);
filtero = cv::cuda::createMorphologyFilter(cv::MORPH_OPEN,CV_8UC1,element);// 开运算
filtero->apply(d_img1, d_resulto);
filterc = cv::cuda::createMorphologyFilter(cv::MORPH_CLOSE,CV_8UC1,element);// 闭运算
filterc->apply(d_img1, d_resultc);
cv::Mat h_resulte,h_resultd,h_resulto,h_resultc;
d_resulte.download(h_resulte);
d_resultd.download(h_resultd);
d_resulto.download(h_resulto);
d_resultc.download(h_resultc);
cv::imshow("Original Image ", h_img1);
cv::imshow("Erosion", h_resulte);
cv::imshow("Dilation", h_resultd);
cv::imshow("Opening", h_resulto);
cv::imshow("closing", h_resultc);
cv::imwrite("erosion7.png", h_resulte);
cv::imwrite("dilation7.png", h_resultd);
cv::imwrite("opening7.png", h_resulto);
cv::imwrite("closing7.png", h_resultc);
cv::waitKey();
return 0;
}
#include
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main( int argc, char** argv )
{
// 打开摄像头
VideoCapture cap(0); //capture the video from web cam
if ( !cap.isOpened() )
{
cout << "Cannot open the web cam" << endl;
return -1;
}
while (true)
{
Mat frame;
// 读取一帧图像
bool flag = cap.read(frame);
if (!flag)
{
cout << "Cannot read a frame from webcam" << endl;
break;
}
// 定义GPU数据等
cuda::GpuMat d_frame, d_frame_hsv,d_intermediate,d_result;
cuda::GpuMat d_frame_shsv[3];
cuda::GpuMat d_thresc[3];
Mat h_result;
d_frame.upload(frame);
// GPU brg 转HSV空间
cuda::cvtColor(d_frame, d_frame_hsv, COLOR_BGR2HSV);
// HSV空间 通道分割
cuda::split(d_frame_hsv, d_frame_shsv);
// 阈值,最大值
// 三通道,阈值二值化,大于阈值为1
cuda::threshold(d_frame_shsv[0], d_thresc[0], 110, 130, THRESH_BINARY);
cuda::threshold(d_frame_shsv[1], d_thresc[1], 50, 255, THRESH_BINARY);
cuda::threshold(d_frame_shsv[2], d_thresc[2], 50, 255, THRESH_BINARY);
// 二值化三通通道与操作
cv::cuda::bitwise_and(d_thresc[0], d_thresc[1],d_intermediate);
cv::cuda::bitwise_and(d_intermediate, d_thresc[2], d_result);
d_result.download(h_result);
imshow("Thresholded Image", h_result);
imshow("Original", frame);
if (waitKey(1) == 'q')
{
break;
}
}
return 0;
}
#include
#include
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
int main()
{
Mat h_image = imread("images/drawing.JPG",0);
if (h_image.empty())
{
cout << "can not open image"<< endl;
return -1;
}
GpuMat d_edge,d_image;
Mat h_edge;
d_image.upload(h_image);
// 边缘检测指针
cv::Ptr<cv::cuda::CannyEdgeDetector> canny_edge = cv::cuda::createCannyEdgeDetector(2.0, 100.0, 3, false);
// 创建边缘检测器
canny_edge->detect(d_image, d_edge);
d_edge.download(h_edge);
imshow("source", h_image);
imshow("detected edges", h_edge);
waitKey(0);
return 0;
}
#include
#include
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
int main()
{
Mat h_image = imread("images/drawing.JPG",0);
if (h_image.empty())
{
cout << "can not open image"<< endl;
return -1;
}
Mat h_edge;
// 首先需要进行边缘检测
cv::Canny(h_image, h_edge, 100, 200, 3);
Mat h_imagec;
cv::cvtColor(h_edge, h_imagec, COLOR_GRAY2BGR);// 变成3通道,可以显示彩色线条
Mat h_imageg = h_imagec.clone();
vector<Vec4i> h_lines;
// CPU 霍夫线变换计时 HoughLinesP()===================================
{
const int64 start = getTickCount(); // 计时=====
HoughLinesP(h_edge, h_lines, 1, CV_PI / 180, 50, 60, 5);
const double time_elapsed = (getTickCount() - start) / getTickFrequency();
cout << "CPU Time : " << time_elapsed * 1000 << " ms" << endl;
cout << "CPU FPS : " << (1/time_elapsed) << endl;
}
for (size_t i = 0; i < h_lines.size(); ++i)
{
// 在图像上 画上检测出来的 直线========
Vec4i line_point = h_lines[i];
line(h_imagec, Point(line_point[0], line_point[1]), Point(line_point[2], line_point[3]), Scalar(0, 0, 255), 2, LINE_AA);
}
GpuMat d_edge, d_lines;// GPU 中 检测结果
d_edge.upload(h_edge);// 边缘图像
{
const int64 start = getTickCount();
Ptr<cuda::HoughSegmentDetector> hough = cuda::createHoughSegmentDetector(1.0f, (float) (CV_PI / 180.0f), 50, 5);
hough->detect(d_edge, d_lines);
const double time_elapsed = (getTickCount() - start) / getTickFrequency();
cout << "GPU Time : " << time_elapsed * 1000 << " ms" << endl;
cout << "GPU FPS : " << (1/time_elapsed) << endl;
}
vector<Vec4i> lines_g;
if (!d_lines.empty())
{
lines_g.resize(d_lines.cols);
Mat h_lines(1, d_lines.cols, CV_32SC4, &lines_g[0]);
d_lines.download(h_lines);// 拷贝检测结果==========
}
for (size_t i = 0; i < lines_g.size(); ++i)
{
// 在图像上 画上检测出来的 直线========
Vec4i line_point = lines_g[i];
line(h_imageg, Point(line_point[0], line_point[1]), Point(line_point[2], line_point[3]), Scalar(0, 0, 255), 2, LINE_AA);
}
imshow("source", h_image);
imshow("detected lines [CPU]", h_imagec);
imshow("detected lines [GPU]", h_imageg);
imwrite("hough_source.png", h_image);
imwrite("hough_cpu_line.png", h_imagec);
imwrite("hough_gpu_line.png", h_imageg);
waitKey(0);
return 0;
}
#include
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main()
{
Mat h_image = imread( "images/drawing.JPG", 0 );
// 创建OPENCV GPU faster特征点检测器
cv::Ptr<cv::cuda::FastFeatureDetector> detector = cv::cuda::FastFeatureDetector::create(100,true,2);
std::vector<cv::KeyPoint> keypoints;// 特征点不用拷贝????
// 上传图像
cv::cuda::GpuMat d_image;
d_image.upload(h_image);
// 执行检测
detector->detect(d_image, keypoints);
// 绘制特征点
cv::drawKeypoints(h_image,keypoints,h_image);
//Show detected keypoints
imshow("Final Result", h_image );
waitKey(0);
return 0;
}
#include
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main()
{
Mat h_image = imread( "images/drawing.JPG", 0 );
// OPENCV GPU orb特征点检测
cv::Ptr<cv::cuda::ORB> detector = cv::cuda::ORB::create();
std::vector<cv::KeyPoint> keypoints;
cv::cuda::GpuMat d_image;
d_image.upload(h_image);
detector->detect(d_image, keypoints);
cv::drawKeypoints(h_image,keypoints,h_image);
imshow("Final Result", h_image );
waitKey(0);
return 0;
}
#include
#include
#include "opencv2/opencv.hpp"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/cudaarithm.hpp"
#include "opencv2/cudafeatures2d.hpp"
#include "opencv2/xfeatures2d/cuda.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
using namespace std;
int main( int argc, char** argv )
{
// cpu 数据
Mat h_object_image = imread( "images/object1.jpg", 0 ); // 带皮牌目标图像
Mat h_scene_image = imread( "images/scene1.jpg", 0 ); // 场景图像,
// gpu 数据
cuda::GpuMat d_object_image;
cuda::GpuMat d_scene_image;
cuda::GpuMat d_keypoints_scene, d_keypoints_object; // GPU 关键点
vector< KeyPoint > h_keypoints_scene, h_keypoints_object; // CPU 关键点
cuda::GpuMat d_descriptors_scene, d_descriptors_object; // GPU 描述子
// 图像 CPU 上传到 GPU
d_object_image.upload(h_object_image);
d_scene_image.upload(h_scene_image);
// SURF_CUDA 检测器
cuda::SURF_CUDA surf(100);
// 检测 特征点并提取 对应的 描述子
surf( d_object_image, cuda::GpuMat(), d_keypoints_object, d_descriptors_object );
surf( d_scene_image, cuda::GpuMat(), d_keypoints_scene, d_descriptors_scene );
// Brute Force 暴力匹配器
Ptr< cuda::DescriptorMatcher > matcher = cuda::DescriptorMatcher::createBFMatcher();
vector< vector< DMatch> > d_matches;
matcher->knnMatch(d_descriptors_object, d_descriptors_scene, d_matches, 2);// 最近邻 2个
// 下载关键点
surf.downloadKeypoints(d_keypoints_scene, h_keypoints_scene);
surf.downloadKeypoints(d_keypoints_object, h_keypoints_object);
std::vector< DMatch > good_matches;
for (int k = 0; k < std::min(h_keypoints_object.size()-1, d_matches.size()); k++)
{
if ( (d_matches[k][0].distance < 0.6*(d_matches[k][1].distance)) &&
((int)d_matches[k].size() <= 2 && (int)d_matches[k].size()>0) )
{
// 最近匹配 < 0.6*次近匹配
good_matches.push_back(d_matches[k][0]);
}
}
std::cout << "size:" <<good_matches.size();
// 绘制匹配点对
Mat h_image_result;
drawMatches( h_object_image, h_keypoints_object, h_scene_image, h_keypoints_scene,
good_matches, h_image_result, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::DEFAULT );
// 找到对应匹配点对 的 图像像素 2d坐标
std::vector<Point2f> object;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
object.push_back(h_keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(h_keypoints_scene[good_matches[i].trainIdx].pt);
}
// 计算匹配平面的 单元变换矩阵
Mat Homo = findHomography(object, scene, RANSAC);
std::vector<Point2f> corners(4); // 图像四个角点
std::vector<Point2f> scene_corners(4);
corners[0] = Point(0, 0);// 最上面
corners[1] = Point(h_object_image.cols, 0);// 目标图像 四点 单应变换后 计算在 场景图像中的位置
corners[2] = Point(h_object_image.cols, h_object_image.rows);
corners[3] = Point(0, h_object_image.rows);
perspectiveTransform(corners, scene_corners, Homo);// 目标点 ====> 单应变换 ====> 场景中的位置
// 画出四条边界线=====
line(h_image_result, scene_corners[0] + Point2f(h_object_image.cols, 0),scene_corners[1] + Point2f(h_object_image.cols, 0), Scalar(255, 0, 0), 4);
line(h_image_result, scene_corners[1] + Point2f(h_object_image.cols, 0),scene_corners[2] + Point2f(h_object_image.cols, 0),Scalar(255, 0, 0), 4);
line(h_image_result, scene_corners[2] + Point2f(h_object_image.cols, 0),scene_corners[3] + Point2f(h_object_image.cols, 0),Scalar(255, 0, 0), 4);
line(h_image_result, scene_corners[3] + Point2f(h_object_image.cols, 0),scene_corners[0] + Point2f(h_object_image.cols, 0),Scalar(255, 0, 0), 4);
imshow("Good Matches & Object detection", h_image_result);
waitKey(0);
return 0;
}
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/cudaobjdetect.hpp"
#include
#include
using namespace std;
using namespace cv;
int main( )
{
Mat h_image;
h_image = imread("images/lena_color_512.tif", 0);
// OPENCV gpu 级联回归 人脸检测
Ptr<cuda::CascadeClassifier> cascade = cuda::CascadeClassifier::create("haarcascade_frontalface_alt2.xml");
cuda::GpuMat d_image;
cuda::GpuMat d_buf;
d_image.upload(h_image);
//cascadeGPU->setMinNeighbors(0);
//cascadeGPU->setScaleFactor(1.01);
cascade->detectMultiScale(d_image, d_buf);// 多尺度检测
// 转换检测结果
std::vector<Rect> detections;
cascade->convert(d_buf, detections);
if (detections.empty())
std::cout << "No detection." << std::endl;
//转换成彩色图
cvtColor(h_image,h_image,COLOR_GRAY2BGR);
for(int i = 0; i < detections.size(); ++i)
{
// 画矩形框=====
rectangle(h_image, detections[i], Scalar(0,255,255), 5);
}
imshow("Result image", h_image);
waitKey(0);
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main()
{
VideoCapture cap(0);// 打开摄像头
if (!cap.isOpened())
{
cerr << "Can not open video source";
return -1;
}
std::vector<cv::Rect> h_found;// cpu 目标框
// OPENCV gpu 级联回归 人脸检测
cv::Ptr<cv::cuda::CascadeClassifier> cascade =
cv::cuda::CascadeClassifier::create("haarcascade_frontalface_alt2.xml");
cv::cuda::GpuMat d_frame, d_gray, d_found;
while(1)
{
Mat frame;
if ( !cap.read(frame) )
{
cerr << "Can not read frame from webcam";
return -1;
}
d_frame.upload(frame);// 上传帧图像到GPU
// 转换成灰度图
cv::cuda::cvtColor(d_frame, d_gray, cv::COLOR_BGR2GRAY);
// 多尺度人脸检测
cascade->detectMultiScale(d_gray, d_found);
// 转换结果
cascade->convert(d_found, h_found);
for(int i = 0; i < h_found.size(); ++i)
{
// 画矩目标形框=====
rectangle(frame, h_found[i], Scalar(0,255,255), 5);
}
imshow("Result", frame);
if (waitKey(1) == 'q') {
break;
}
}
return 0;
}
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/cudaobjdetect.hpp"
#include
#include
using namespace std;
using namespace cv;
int main( )
{
Mat h_image;
h_image = imread("images/lena_color_512.tif", 0); // 灰度图格式
// OPENCV gpu 级联回归 人脸检测
Ptr<cuda::CascadeClassifier> cascade = cuda::CascadeClassifier::create("haarcascade_eye.xml");
cuda::GpuMat d_image;
cuda::GpuMat d_buf;
d_image.upload(h_image);
//cascadeGPU->setMinNeighbors(0);
//cascadeGPU->setScaleFactor(1.01);
cascade->detectMultiScale(d_image, d_buf);
std::vector<Rect> detections;
cascade->convert(d_buf, detections);// 转换结果
if (detections.empty())
std::cout << "No detection." << std::endl;
// 转换成彩色图格式,方便绘制彩色框
cvtColor(h_image,h_image,COLOR_GRAY2BGR);
for(int i = 0; i < detections.size(); ++i)
{
// 画矩目标形框=====
rectangle(h_image, detections[i], Scalar(0,255,255), 5);
}
imshow("Result image", h_image);
waitKey(0);
return 0;
}
#include
#include
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
int main()
{
VideoCapture cap("abc.avi");
if (!cap.isOpened())
{
cerr << "can not open camera or video file" << endl;
return -1;
}
Mat frame;
cap.read(frame);
GpuMat d_frame;
d_frame.upload(frame);
// MOG算法,即高斯混合模型分离算法,全称Gaussian Mixture-based Background/Foreground Segmentation Algorithm
Ptr<BackgroundSubtractor> mog = cuda::createBackgroundSubtractorMOG();
GpuMat d_fgmask,d_fgimage,d_bgimage;
Mat h_fgmask,h_fgimage,h_bgimage;
mog->apply(d_frame, d_fgmask, 0.01);
while(1)
{
cap.read(frame);
if (frame.empty())
break;
d_frame.upload(frame);// 上传到GPU
int64 start = cv::getTickCount();// 计时
mog->apply(d_frame, d_fgmask, 0.01);// 运动检测
mog->getBackgroundImage(d_bgimage);// 获取静态背景
double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
std::cout << "FPS : " << fps << std::endl;
d_fgimage.create(d_frame.size(), d_frame.type());
d_fgimage.setTo(Scalar::all(0));
d_frame.copyTo(d_fgimage, d_fgmask);// 按mask拷贝
d_fgmask.download(h_fgmask);
d_fgimage.download(h_fgimage);
d_bgimage.download(h_bgimage);
imshow("image", frame);
imshow("foreground mask", h_fgmask);// 前景
imshow("foreground image", h_fgimage);
imshow("mean background image", h_bgimage);// 背景
if (waitKey(1) == 'q')
break;
}
return 0;
}
#include
#include
#include "opencv2/opencv.hpp"
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/cudabgsegm.hpp"
#include "opencv2/cudalegacy.hpp"
#include "opencv2/video.hpp"
#include "opencv2/highgui.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
int main(
)
{
VideoCapture cap("abc.avi");
if (!cap.isOpened())
{
cerr << "can not open video file" << endl;
return -1;
}
Mat frame;
cap.read(frame);
GpuMat d_frame;
d_frame.upload(frame);
// 背景去除
Ptr<BackgroundSubtractor> gmg = cuda::createBackgroundSubtractorGMG(40);
GpuMat d_fgmask,d_fgimage,d_bgimage;
Mat h_fgmask,h_fgimage,h_bgimage;
gmg->apply(d_frame, d_fgmask);
while(1)
{
cap.read(frame);
if (frame.empty())
break;
d_frame.upload(frame);
int64 start = cv::getTickCount();
gmg->apply(d_frame, d_fgmask, 0.01);
double fps = cv::getTickFrequency() / (cv::getTickCount() - start);
std::cout << "FPS : " << fps << std::endl;
d_fgimage.create(d_frame.size(), d_frame.type());
d_fgimage.setTo(Scalar::all(0));
d_frame.copyTo(d_fgimage, d_fgmask);
d_fgmask.download(h_fgmask);
d_fgimage.download(h_fgimage);
imshow("image", frame);
imshow("foreground mask", h_fgmask);
imshow("foreground image", h_fgimage);
if (waitKey(30) == 'q')
break;
}
return 0;
}
pycuda