cv::Mat backimg = cv::imread("../imgs/3789.png");
cv::cvtColor(backimg, backimg, cv::COLOR_BGR2GRAY);
void resize( InputArray src, OutputArray dst,
Size dsize, double fx = 0, double fy = 0,
int interpolation = INTER_LINEAR );
OpenCV中,X轴是横轴,即列数;Y轴是竖轴,即行数。Y行X列
void flip(InputArray src, OutputArray dst, int flipCode);
enum
{
CV_THRESH_BINARY =0, /* value = value > threshold ? max_value : 0 */
CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */
CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */
CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */
CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */
CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; combine the flag with one of the above CV_THRESH_* values */
}
cv::threshold(imag, result, 30, 255, CV_THRESH_BINARY);
当图像的直方图具有明显单峰特征时,二值化分割阈值选取时可以考虑以下方法
// 单峰三角阈值法
cv::Mat Thresh_Unimodal(cv::Mat &src, int& idx)
{
cv::Mat result = cv::Mat::zeros(src.size(), CV_8UC1);
// 统计直方图
cv::Mat hist = cv::Mat::zeros(1, 256, CV_32FC1);
for (int i = 0; i < src.rows; ++i)
{
for (int j = 0; j < src.cols; ++j)
{
hist.at<float>(0, src.at<uchar>(i, j))++;
}
}
hist.at<float>(0, 255) = 0;
hist.at<float>(0, 0) = 0;
// 搜索最大值位置
float max = 0;
int maxidx = 0;
for (int i = 0; i < 256; ++i)
{
if (hist.at<float>(0, i) > max)
{
max = hist.at<float>(0, i);
maxidx = i;
}
}
// 判断最大点在哪一侧,true为左侧,false为右侧
bool lr = maxidx < 127;
float maxd = 0;
int maxdidx = 0;
// 假设在左侧
if (lr)
{
float A = float(-max);
float B = float(maxidx - 255);
float C = float(max * 255);
for (int i = maxidx + 1; i < 256; ++i)
{
float x0 = float(i);
float y0 = hist.at<float>(0, i);
float d = abs(A * x0 + B * y0 + C) / std::sqrt(A * A + B * B);
if (d > maxd)
{
maxd = d;
maxdidx = i;
}
}
}
// 假设在右侧
else {
float A = float(-max);
float B = float(maxidx);
float C = 0.0f;
for (int i = 0; i < maxidx; ++i)
{
float x0 = float(i);
float y0 = hist.at<float>(0, i);
float d = abs(A * x0 + B * y0 + C) / std::sqrt(A * A + B * B);
if (d > maxd)
{
maxd = d;
maxdidx = i;
}
}
}
// 二值化
result.setTo(255, src > maxdidx);
idx = maxdidx;
return result;
}
cv::subtract(backimg, img_mat, sub_img);
cv::line ( InputOutputArray img,
Point pt1,
Point pt2,
const Scalar & color,
int thickness = 1,
int lineType = LINE_8,
int shift = 0
)
void cv::arrowedLine ( InputOutputArray img,
Point pt1,
Point pt2,
const Scalar & color,
int thickness = 1,
int line_type = 8,
int shift = 0,
double tipLength = 0.1
)
void cv::circle ( InputOutputArray img,
Point center,
int radius,
const Scalar & color,
int thickness = 1,
int lineType = LINE_8,
int shift = 0
)
void cv::rectangle ( InputOutputArray img,
Point pt1,
Point pt2,
const Scalar & color,
int thickness = 1,
int lineType = LINE_8,
int shift = 0
)
void cv::polylines ( Mat & img,
const Point *const * pts,
const int * npts,
int ncontours,
bool isClosed,
const Scalar & color,
int thickness = 1,
int lineType = LINE_8,
int shift = 0
)
void cv::ellipse ( InputOutputArray img,
Point center,
Size axes,
double angle,
double startAngle,
double endAngle,
const Scalar & color,
int thickness = 1,
int lineType = LINE_8,
int shift = 0
)
void cv::drawMarker ( Mat & img,
Point position,
const Scalar & color,
int markerType = MARKER_CROSS,
int markerSize = 20,
int thickness = 1,
int line_type = 8
)
void cv::putText ( InputOutputArray img,
const String & text,
Point org,
int fontFace,
double fontScale,
Scalar color,
int thickness = 1,
int lineType = LINE_8,
bool bottomLeftOrigin = false
)
void fillConvexPoly(InputOutputArray img, InputArray points,
const Scalar& color, int lineType = LINE_8,
int shift = 0);
void fillPoly(InputOutputArray img, InputArrayOfArrays pts,
const Scalar& color, int lineType = LINE_8, int shift = 0,
Point offset = Point() );
// 绘制多边形集合
fillPoly(result, pic, Scalar(0, 0, 255), 16, 0);
// 绘制单个凸多边形
fillConvexPoly(result2, points2, Scalar(0, 0, 255), 16, 0);
注意fillPoly在绘制多个多边形时,如果某两个多边形有交叉,则该交叉区域便取消填充,并保持原样,可以想象成将一个flag从false设为true,又设为false。
除此之外,两个多边形绘制函数的原理有所差异。区别除了一个是绘制单个多边形,另一个可以绘制多个多边形;
还有个更关键的差异,fillConvexPoly绘制的是凸多边形,而fillPoly可以绘制任意多边形
void drawMarker(InputOutputArray img, Point position, const Scalar& color,
int markerType = MARKER_CROSS, int markerSize=20, int thickness=1,
int line_type=8);
drawMarker(result, Point(src.cols / 2, src.rows / 2), Scalar(0, 0, 255), MARKER_TILTED_CROSS, 200, 5, 16);
ARKER_CROSS为十字,MARKER_TILTED_CROSS为叉,MARKER_STAR为星星,MARKER_DIAMOND为菱形,MARKER_SQUARE为正方形,MARKER_TRIANGLE_UP为正三角,MARKER_TRIANGLE_DOWN为倒三角。
void minMaxIdx(InputArray src, double* minVal, double* maxVal = 0,
int* minIdx = 0, int* maxIdx = 0, InputArray mask = noArray());
void minMaxLoc(InputArray src, CV_OUT double* minVal,
CV_OUT double* maxVal = 0, CV_OUT Point* minLoc = 0,
CV_OUT Point* maxLoc = 0, InputArray mask = noArray());
void normalize( InputArray src, OutputArray dst, double alpha = 1, double beta = 0,
int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray());
针对第三个参数alpha和第四个参数beta,在不同归一化类型时,作用不一样:
const int channels[] = { 0 };
cv::Mat hist;//定义输出Mat类型
int dims = 1;//设置直方图维度
const int histSize[] = { 256 }; //直方图每一个维度划分的柱条的数目
//每一个维度取值范围
float pranges[] = { 0, 255 };//取值区间
const float* ranges[] = { pranges };
calcHist(&sub_img, 1, channels, cv::Mat(), hist, dims, histSize, ranges, true, false);//计算直方图
// 直方图绘制方法一
int scale = 2;
int hist_height = 256;
cv::Mat hist_img = cv::Mat::zeros(hist_height, 256 * scale, CV_8UC3); //创建一个黑底的8位的3通道图像,高256,宽256*2
double max_val;
minMaxLoc(hist, 0, &max_val, 0, 0);//计算直方图的最大像素值
//将像素的个数整合到 图像的最大范围内
//遍历直方图得到的数据
for (int i = 0; i < 256; i++)
{
float bin_val = hist.at<float>(i); //遍历hist元素(注意hist中是float类型)
int intensity = cvRound(bin_val*hist_height / max_val); //绘制高度
rectangle(hist_img, cv::Point(i*scale, hist_height - 1), cv::Point((i + 1)*scale - 1, hist_height - intensity), cv::Scalar(255, 255, 255));//绘制直方图
}
// 直方图绘制方法二
int hist_h = 300;//直方图的图像的高
int hist_w = 512; //直方图的图像的宽
int bin_w = hist_w / histSize[0];//直方图的等级
cv::Mat histImage(hist_h, hist_w, CV_8UC3, cv::Scalar(0, 0, 0));//绘制直方图显示的图像
//绘制并显示直方图
normalize(hist, hist, 0, hist_h, cv::NORM_MINMAX, -1, cv::Mat());//归一化直方图
for (int i = 1; i < histSize[0]; i++)
{
cv::line(histImage, cv::Point((i - 1) * bin_w, hist_h - cvRound(hist.at<float>(i - 1))),
cv::Point((i)*bin_w, hist_h - cvRound(hist.at<float>(i))), cv::Scalar(255, 255, 255), 2, 8, 0);
}
通过图像数据的直方图,可以快速判断图像的亮度和质量。
直方图均衡化就是通过图像变换使得直方图均匀分布,起到对比度增强的效果。针对离散形式的图像数据,最常用的一种方法就是累计概率分布。首先统计0-255灰度值所占像素个数;再计算出像素个数与总像素的比,表示为出现的概率;从0开始进行累计概率分布,即从0慢慢累加各层概率值直到1;则均衡化图像的灰度值=原灰度值所对应的累计概率*255。
equalizeHist(sub_img, equal_gray);
OpenCV是BGR色彩空间,第一个通道是蓝色通道Blur,第二个通道是绿色通道Green,第三个通道是红色通道Red
分离
void split(InputArray m, OutputArrayOfArrays mv);
cv::split(src, channels);
合并
void merge(const Mat* mv, size_t count, OutputArray dst);
void merge(InputArrayOfArrays mv, OutputArray dst);
cv::merge(channels, result);
int img_equalize(cv::Mat &src_img){
Mat imageRGB[3];
split(src_img, imageRGB);
for (int i = 0; i < 3; i++){
equalizeHist(imageRGB[i], imageRGB[i]);
}
merge(imageRGB, 3, src_img);
}
cv::Mat img_enhance(cv::Mat ori_img){
int height = ori_img.rows;
int width = ori_img.cols;
cv::Mat dst = cv::Mat::zeros(ori_img.size(), ori_img.type());
float alpha = 3;
float beta = 1.5;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int b = ori_img.at<cv::Vec3b>(row, col)[0];
int g = ori_img.at<cv::Vec3b>(row, col)[1];
int r = ori_img.at<cv::Vec3b>(row, col)[2];
dst.at<cv::Vec3b>(row, col)[0] = cv::saturate_cast<uchar>(b * alpha + beta);
dst.at<cv::Vec3b>(row, col)[1] = cv::saturate_cast<uchar>(g * alpha + beta);
dst.at<cv::Vec3b>(row, col)[2] = cv::saturate_cast<uchar>(r * alpha + beta);
}
}
normalize(dst, dst, 0, 255, NORM_MINMAX);
convertScaleAbs(dst, dst);
return dst;
}
cv::Mat img_enhance_log(cv::Mat ori_img){
int height = ori_img.rows;
int width = ori_img.cols;
cv::Mat dst = cv::Mat::zeros(ori_img.size(), ori_img.type());
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
dst.at<cv::Vec3b>(row, col)[0] = log(1 + ori_img.at<cv::Vec3b>(row, col)[0]);
dst.at<cv::Vec3b>(row, col)[1] = log(1 + ori_img.at<cv::Vec3b>(row, col)[1]);
dst.at<cv::Vec3b>(row, col)[2] = log(1 + ori_img.at<cv::Vec3b>(row, col)[2]);
}
}
normalize(dst, dst, 0, 255, NORM_MINMAX);
convertScaleAbs(dst, dst);
return dst;
}
cv::Mat img_enhance_gamma(cv::Mat ori_img){
Mat imageGamma(ori_img.size(), CV_32FC3);
for (int i = 0; i < ori_img.rows; i++)
{
for (int j = 0; j < ori_img.cols; j++)
{
imageGamma.at<Vec3f>(i, j)[0] = (ori_img.at<Vec3b>(i, j)[0])*(ori_img.at<Vec3b>(i, j)[0])*(ori_img.at<Vec3b>(i, j)[0]);
imageGamma.at<Vec3f>(i, j)[1] = (ori_img.at<Vec3b>(i, j)[1])*(ori_img.at<Vec3b>(i, j)[1])*(ori_img.at<Vec3b>(i, j)[1]);
imageGamma.at<Vec3f>(i, j)[2] = (ori_img.at<Vec3b>(i, j)[2])*(ori_img.at<Vec3b>(i, j)[2])*(ori_img.at<Vec3b>(i, j)[2]);
}
}
//归一化到0~255
normalize(imageGamma, imageGamma, 0, 255, NORM_MINMAX);
//转换成8bit图像显示
convertScaleAbs(imageGamma, imageGamma);
return imageGamma;
}
int img_laplace(cv::Mat &ori_img){
cv::Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, 0, 5, 0, 0, -1, 0);
filter2D(ori_img, ori_img, CV_8UC3, kernel);
}
Canny(grayimg, img_canny, 150, 100, 3);
void morphologyEx( InputArray src, OutputArray dst,
int op, InputArray kernel,
Point anchor = Point(-1,-1), int iterations = 1,
int borderType = BORDER_CONSTANT,
const Scalar& borderValue = morphologyDefaultBorderValue() );
cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));//创建结构元
cv::erode(sub_img, erode_mat, kernel);
cv::dilate(sub_img, dilate_mat, kernel);
// 形态学处理--开运算
morphologyEx(sub_img, morph_open_mat, cv::MORPH_OPEN, kernel, cv::Point(-1, -1));
// 形态学处理--闭运算
morphologyEx(sub_img, morph_close_mat, cv::MORPH_CLOSE, kernel, cv::Point(-1, -1));
// 黑帽运算就是将闭运算后的图像减去原图,突出了比原图轮廓周围区域更暗的区域
cv::morphologyEx(test, result, MORPH_BLACKHAT, kernel);
// 顶帽运算就是将原图减去开运算后的图像,放大了裂痕或局部低亮度区域
cv::morphologyEx(test, result, MORPH_TOPHAT, kernel);
// 形态学梯度计算就是将膨胀后的图像减去腐蚀后的图像,可以有效提取边缘轮廓信息
cv::morphologyEx(test, result, MORPH_GRADIENT, kernel);
图像锐化(image sharpening)是补偿图像的轮廓,增强图像的边缘及灰度跳变的部分,使图像变得清晰,分为空间域处理和频域处理两类。图像锐化是为了突出图像上地物的边缘、轮廓,或某些线性目标要素的特征。这种滤波方法提高了地物边缘与周围像元之间的反差,因此也被称为边缘增强。
cv::Mat sharp = (cv::Mat_<int>(3, 3) << 1, 1, 1, 1, -8, 1, 1, 1, 1);
cv::Mat result;
filter2D(morph_open_mat, result, -1, sharp, cv::Point(-1, -1), 0, cv::BORDER_DEFAULT);
convertScaleAbs(result, result);
Mat roi = Mat::zeros(Size(scale_width, scale_height), CV_8UC1);
std::vector<std::vector<Point>> contour;
std::vector<Point> pts;
pts.emplace_back(Point(min_x, min_y));
pts.emplace_back(Point(min_x, max_y));
pts.emplace_back(Point(max_x, max_y));
pts.emplace_back(Point(max_x, min_y));
contour.push_back(pts);
drawContours(roi, contour, 0, Scalar::all(255), -1);
rgbImg1.copyTo(dstImg1, roi);
double arcLength( InputArray curve, bool closed );
double contourArea( InputArray contour, bool oriented = false );
findContours( InputOutputArray image, OutputArrayOfArrays contours,
OutputArray hierarchy, int mode,
int method, Point offset=Point());
vector<vector<cv::Point>> contours;
cv::findContours(sub_img, contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
if(cv::contourArea(contours[i]) < 100) continue;
cv::Scalar color(255, 255, 255);
drawContours(sub_img, contours, i, color, 2);
}
cv::Mat labels, stats, centroids;
int num = connectedComponentsWithStats(sub_img, labels, stats, centroids);
vector<cv::Vec3b> color(num + 1);
color[0] = cv::Vec3b(0, 0, 0);//背景色
for (int m = 1; m <= num; m++) {
color[m] = cv::Vec3b(rand() % 256, rand() % 256, rand() % 256);
if (stats.at<int>(m - 1, cv::CC_STAT_AREA) < 100)
color[m] = cv::Vec3b(0, 0, 0);
}
cv::Mat src_color = cv::Mat::zeros(sub_img.size(), CV_8UC3);
for (int x = 0; x < sub_img.rows; x++)
for (int y = 0; y < sub_img.cols; y++)
{
int label = labels.at<int>(x, y);//注意labels是int型,不是uchar.
src_color.at<cv::Vec3b>(x, y) = color[label];
}
imshow("labelMap", src_color);
blur(sub_img, sub_img, cv::Size(3, 3));
medianBlur(sub_img, sub_img, 3);
// 灰度
int pv = dstImg1.at<uchar>(int(keypoints_img1[matches[i].queryIdx].pt.y), int(keypoints_img1[matches[i].queryIdx].pt.x));
// RGB图
Vec3b bgr = dstImg1.at<Vec3b>(int(keypoints_img1[matches[i].queryIdx].pt.y), int(keypoints_img1[matches[i].queryIdx].pt.x));
底图:
将水印加到图片左上角,首先在左上角划定一个和logo图片一样大小的ROI区域出来,然后将logo添加到ROI区域里
#include
#include
#include
using namespace std;
int main() {
cv::Mat logo = cv::imread("../logo.png");
stringstream imgname;
imgname << "../000001.jpg";
cv::Mat ori_img = cv::imread(imgname.str());
cv::Mat imgROI = ori_img(cv::Rect(20, 20, logo.cols, logo.rows));
logo.copyTo(imgROI);
}
void bitwise_and(InputArray src1, InputArray src2,OutputArray dst, InputArray mask=noArray());//dst = src1 & src2
void bitwise_or(InputArray src1, InputArray src2,OutputArray dst, InputArray mask=noArray());//dst = src1 | src2
void bitwise_not(InputArray src, OutputArray dst,InputArray mask=noArray());//dst = ~src
void bitwise_xor(InputArray src1, InputArray src2,OutputArray dst, InputArray mask=noArray());//dst = src1 ^ src2
将四个字抠出来做水印,需要转灰度,然后阈值化操作
#include
#include
#include
using namespace std;
int main() {
cv::Mat logo = cv::imread("../logo.png");
stringstream imgname;
imgname << "../000001.jpg";
cv::Mat ori_img = cv::imread(imgname.str());
// 先定义一个掩膜Mask(黑色区域被忽略,仅剩下白色区域),然后将logo图像转为灰度图像存入到Mask中
cv::Mat mask;
cv::cvtColor(logo, mask, cv::COLOR_BGR2GRAY);
// 对掩膜Mask进行取反操作
bitwise_not(mask, mask);
// 取反后的图进行阈值化操作
cv::threshold(mask, mask, 10, 255, cv::THRESH_BINARY);
cv::Mat imgROI = ori_img(cv::Rect(20, 20, logo.cols, logo.rows));
// 将logo拷贝到imgROI上,掩码为不为0的部分起作用,为0的部分不起作用
logo.copyTo(imgROI, mask);
cv::imwrite("../img.jpg", ori_img);
cv::waitKey(0);
}
cv::HoughLinesP(
InputArray src, // 输入图像(8位灰度图像)
OutputArray lines, // 输出直线两点坐标(vector)
double rho, // 生成极坐标时候的像素扫描步长
double theta, //生成极坐标时候的角度步长(一般取CV_PI/180)
int threshold, // 累加器阈值,获得足够交点的极坐标点才被看成是直线
double minLineLength=0;// 直线最小长度
double maxLineGap=0;// 直线最大间隔
)
int main()
{
Mat img = imread("D:\\pdf2jpg\\nn\\00010.jpg");
//GetRedComponet(img);
Mat contours,res;
Canny(img,contours,125,350);
cvtColor( contours, res, CV_GRAY2BGR );
vector<Vec4i> lines;
HoughLinesP(contours, lines, 1, CV_PI/180, 80, 30, 10 );
for( size_t i = 0; i < lines.size(); i++ )
{
Vec4i l = lines[i];
line(res, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 2);
}
imwrite("D:\\dst1.jpg",res);
}
int cv::solve(
cv::InputArray X, // 左边矩阵X, nxn
cv::InputArray Y, // 右边矩阵Y,nx1
cv::OutputArray A, // 结果,系数矩阵A,nx1
int method = cv::DECOMP_LU // 估算方法
);
void PolyFit(std::vector<cv::Point> &points, const int order,
cv::Mat &coeff) {
const int n = points.size();
cv::Mat A = cv::Mat::zeros(order + 1, order + 1, CV_64FC1);
cv::Mat B = cv::Mat::zeros(order + 1, 1, CV_64FC1);
// 构建A矩阵
for (int i = 0; i < order + 1; ++i) {
for (int j = 0; j < order + 1; ++j) {
for (int k = 0; k < n; ++k) {
// A.at(i, j) += std::pow(points.at(k).x, i + j);
A.at<double>(i, j) += std::pow(points.at(k).y, i + j);
}
}
}
// 构建B矩阵
for (int i = 0; i < order + 1; ++i) {
for (int k = 0; k < n; ++k) {
// B.at(i, 0) += std::pow(points.at(k).x, i) * points.at(k).y;
B.at<double>(i, 0) += std::pow(points.at(k).y, i) * points.at(k).x;
}
}
coeff = cv::Mat::zeros(order + 1, 1, CV_64FC1);
clock_t start = clock();
// 求解
if (!cv::solve(A, B, coeff, cv::DECOMP_LU)) {
std::cout << "Failed to solve !" << std::endl;
}
clock_t end = clock();
cout << "solve consume: " << (double)(end - start) / CLOCKS_PER_SEC * 1000 << endl;
cout << "coeff: " << coeff << endl;
}
void approxPolyDP(InputArray curve, OutputArray approxCurve, double epsilon, bool closed);
approxPolyDP(contourMat, approxCurve, 10, true);
calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
InputArray prevPts, InputOutputArray nextPts,
OutputArray status, OutputArray err,
Size winSize = Size(21,21), int maxLevel = 3,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
int flags = 0, double minEigThreshold = 1e-4 );
// lucas_kanade.cpp
#include
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
using namespace std;
int lucas_kanade(const string& filename, bool save)
{
VideoCapture capture(filename);
if (!capture.isOpened()){
//打开视频输入错误
cerr << "Unable to open file!" << endl;
return 0;
}
// 创建一些随机的颜色
vector<Scalar> colors;
RNG rng;
for(int i = 0; i < 100; i++)
{
int r = rng.uniform(0, 256);
int g = rng.uniform(0, 256);
int b = rng.uniform(0, 256);
colors.push_back(Scalar(r,g,b));
}
Mat old_frame, old_gray;
vector<Point2f> p0, p1;
// 取第一帧并在其中找到角点
capture >> old_frame;
cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
// 创建用于绘图的掩模图像
Mat mask = Mat::zeros(old_frame.size(), old_frame.type());
int counter = 0;
while(true){
Mat frame, frame_gray;
capture >> frame;
if (frame.empty())
break;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// 计算光流
vector<uchar> status;
vector<float> err;
TermCriteria criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
calcOpticalFlowPyrLK(old_gray, frame_gray, p0, p1, status, err, Size(15,15), 2, criteria);
vector<Point2f> good_new;
for(uint i = 0; i < p0.size(); i++)
{
// 选择比较好的点
if(status[i] == 1) {
good_new.push_back(p1[i]);
// 画出轨迹
line(mask,p1[i], p0[i], colors[i], 2);
circle(frame, p1[i], 5, colors[i], -1);
}
}
Mat img;
add(frame, mask, img);
if (save) {
string save_path = "./optical_flow_frames/frame_" + to_string(counter) + ".jpg";
imwrite(save_path, img);
}
imshow("flow", img);
int keyboard = waitKey(25);
if (keyboard == 'q' || keyboard == 27)
break;
// 创建用于绘图的掩模图像
old_gray = frame_gray.clone();
p0 = good_new;
counter++;
}
}
void cv::calcOpticalFlowFarneback( InputArray _prev0, InputArray _next0,
OutputArray _flow0, double pyr_scale, int levels, int winsize,
int iterations, int poly_n, double poly_sigma, int flags )
#include "stdafx.h"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include
using namespace cv;
using namespace std;
static void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
double, const Scalar& color)
{
for(int y = 0; y < cflowmap.rows; y += step)
for(int x = 0; x < cflowmap.cols; x += step)
{
const Point2f& fxy = flow.at<Point2f>(y, x);
line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),
color);
circle(cflowmap, Point(x,y), 2, color, -1);
}
}
int main(int, char**)
{
VideoCapture cap(0);
if( !cap.isOpened() )
return -1;
Mat prevgray, gray, flow, cflow, frame;
namedWindow("flow", 1);
for(;;)
{
cap >> frame;
cvtColor(frame, gray, COLOR_BGR2GRAY);
if( prevgray.data )
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cvtColor(prevgray, cflow, COLOR_GRAY2BGR);
drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
imshow("flow", cflow);
}
if(waitKey(30)>=0)
break;
std::swap(prevgray, gray);
}
return 0;
}
int floodFill( InputOutputArray image,
Point seedPoint, Scalar newVal, CV_OUT Rect* rect = 0,
Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),
int flags = 4 );
int floodFill( InputOutputArray image, InputOutputArray mask,
Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0,
Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),
int flags = 4 );
通过选中和种子点相连相近的区域,将其转换为指定颜色,以达到标记或者分离图像的目的,进而完成对图像的一些分析和处理。
loDiff为Scalar(1, 1, 1),upDiff为Scalar(10, 10, 10),表示当前观测点的像素X与周围已被填充的像素点数值Y,需满足X-Y<10,且Y-X<1,才被填充。
loDiff为Scalar(10, 10, 10),upDiff为Scalar(1, 1, 1),则需满足X-Y<1,且Y-X<10,才被填充。
Mat src = imread("test.jpg");
Rect roi;
int flags = 8;
Mat mask = Mat::zeros(src.rows + 2,src.cols + 2, CV_8UC1);
mask.at<uchar>(src.rows / 2, src.cols / 2) = 255;
floodFill(src, mask, Point(src.cols / 2, src.rows / 2), Scalar(255, 0, 255), &roi, Scalar(10, 10, 10), Scalar(1, 1, 1), flags);
void add(InputArray src1,
InputArray src2,
OutputArray dst,
InputArray mask = noArray(),
int dtype = -1
);
void subtract(InputArray src1,
InputArray src2,
OutputArray dst,
InputArray mask = noArray(),
int dtype = -1
);
// 两个数组相乘,对应位置上的元素相乘,得到该位置上的值
void multiply(InputArray src1,
InputArray src2,
OutputArray dst,
double scale = 1,
int dtype = -1
);
// 两个数组或标量按数组的每个元素的除法的功能
// 首先,除法可能是一个float数据和Mat的除,这个时候,计算的是float和Mat中每个数值的除;如果是两个Mat除,那就是对应位置做除法。
// 其次,除数Mat中可能会存在0,这个位置求出的值直接取零。
// src1 / src2 * scale
void divide(InputArray src1,
InputArray src2,
OutputArray dst,
double scale = 1,
int dtype = -1
);
// scale / src2
void divide(double scale,
InputArray src2,
OutputArray dst,
int dtype = -1
);
void addWeighted(InputArray src1, double alpha, InputArray src2,
double beta, double gamma, OutputArray dst, int dtype = -1);
void matchTemplate( InputArray image, InputArray templ,OutputArray result,
int method, InputArray mask = noArray());
cv::Mat src = imread("test1.jpg");
cv::Mat sample = imread("t.png");
// 匹配
cv::Mat result;
matchTemplate(src, sample, result, CV_TM_CCOEFF);
// 归一化
normalize(result, result, 0, 1, NORM_MINMAX, -1, Mat());
// 获取最小值
double minValue; double maxValue; Point minLocation; Point maxLocation;
Point matchLocation;
minMaxLoc(result, &minValue, &maxValue, &minLocation, &maxLocation, Mat());
matchLocation = maxLocation;
// 框选结果
cv::Mat draw = src.clone();
rectangle(draw, matchLocation, Point(matchLocation.x + sample.cols, matchLocation.y + sample.rows), Scalar(255, 0, 0), 2, 8, 0);
cv::Rect boundingRect( InputArray array );
输入:InputArray类型的array,输入灰度图像或二维点集。
输出:Rect类型的矩形信息,包括矩形尺寸和位置。
cv::RotatedRect minAreaRect( InputArray points );
输入:InputArray类型的points,输入灰度图像或二维点集。
输出:RotatedRect类型的旋转矩形信息,即矩形四角点位置。
一个小面积孔洞闭合的函数
void Clear_MicroConnected_Areas(cv::Mat src, cv::Mat &dst, double min_area)
{
// 备份复制
dst = src.clone();
std::vector<std::vector<cv::Point> > contours; // 创建轮廓容器
std::vector<cv::Vec4i> hierarchy;
// 寻找轮廓的函数
// 第四个参数CV_RETR_EXTERNAL,表示寻找最外围轮廓
// 第五个参数CV_CHAIN_APPROX_NONE,表示保存物体边界上所有连续的轮廓点到contours向量内
cv::findContours(src, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_NONE, cv::Point());
if (!contours.empty() && !hierarchy.empty())
{
std::vector<std::vector<cv::Point> >::const_iterator itc = contours.begin();
// 遍历所有轮廓
while (itc != contours.end())
{
// 定位当前轮廓所在位置
cv::Rect rect = cv::boundingRect(cv::Mat(*itc));
// contourArea函数计算连通区面积
double area = contourArea(*itc);
// 若面积小于设置的阈值
if (area < min_area)
{
// 遍历轮廓所在位置所有像素点
for (int i = rect.y; i < rect.y + rect.height; i++)
{
uchar *output_data = dst.ptr<uchar>(i);
for (int j = rect.x; j < rect.x + rect.width; j++)
{
// 将连通区的值置0
if (output_data[j] == 255)
{
output_data[j] = 0;
}
}
}
}
itc++;
}
}
}
hole = 255 - mask;
Clear_MicroConnected_Areas(hole, hole, row * col / 300);
mask = 255 - hole;
Clear_MicroConnected_Areas(mask, mask, row * col / 300);
白平衡的意义在于,对在特定光源下拍摄时出现的偏色现象,通过加强对应的补色来进行补偿,使白色物体能还原为白色。
完美反射算法是白平衡各种算法中较常见的一种,比灰度世界算法更优。它假设图像世界中最亮的白点是一个镜面,能完美反射光照;基于白点,将三通道的数值进行适当地调整,以达到白平衡效果;除此之外,还需要统计最亮的一定区间的三通道均值,该均值与该通道最大值的差距决定了该通道调整的力度。
实现流程如下:
1.计算图像RGB三通道各自的灰度最大值Rmax、Gmax、Bmax。
2.利用三通道数值和,确定图像最亮区间的下限T。
3.计算图像三通道数值和大于T的点的三通道均值Rm、Gm、Bm。
4.计算三通道的补偿系数,即单通道最大值除以单通道亮区平均值。
#include
#include
using namespace std;
// 白平衡-完美反射
cv::Mat WhiteBalcane_PRA(cv::Mat src)
{
cv::Mat result = src.clone();
if (src.channels() != 3)
{
cout << "The number of image channels is not 3." << endl;
return result;
}
// 通道分离
vector<cv::Mat> Channel;
cv::split(src, Channel);
// 定义参数
int row = src.rows;
int col = src.cols;
int RGBSum[766] = { 0 };
uchar maxR, maxG, maxB;
// 计算单通道最大值
for (int i = 0; i < row; ++i)
{
uchar *b = Channel[0].ptr<uchar>(i);
uchar *g = Channel[1].ptr<uchar>(i);
uchar *r = Channel[2].ptr<uchar>(i);
for (int j = 0; j < col; ++j)
{
int sum = b[j] + g[j] + r[j];
RGBSum[sum]++;
maxB = max(maxB, b[j]);
maxG = max(maxG, g[j]);
maxR = max(maxR, r[j]);
}
}
// 计算最亮区间下限T
int T = 0;
int num = 0;
int K = static_cast<int>(row * col * 0.1);
for (int i = 765; i >= 0; --i)
{
num += RGBSum[i];
if (num > K)
{
T = i;
break;
}
}
// 计算单通道亮区平均值
double Bm = 0.0, Gm = 0.0, Rm = 0.0;
int count = 0;
for (int i = 0; i < row; ++i)
{
uchar *b = Channel[0].ptr<uchar>(i);
uchar *g = Channel[1].ptr<uchar>(i);
uchar *r = Channel[2].ptr<uchar>(i);
for (int j = 0; j < col; ++j)
{
int sum = b[j] + g[j] + r[j];
if (sum > T)
{
Bm += b[j];
Gm += g[j];
Rm += r[j];
count++;
}
}
}
Bm /= count;
Gm /= count;
Rm /= count;
// 通道调整
Channel[0] *= maxB / Bm;
Channel[1] *= maxG / Gm;
Channel[2] *= maxR / Rm;
// 合并通道
cv::merge(Channel, result);
return result;
}
int main()
{
// 载入原图
cv::Mat src = cv::imread("test21.jpg");
// 白平衡-完美反射
cv::Mat result = WhiteBalcane_PRA(src);
// 显示
cv::imshow("src", src);
cv::imshow("result", result);
cv::waitKey(0);
return 0;
}
void buildPyramid( InputArray src, OutputArrayOfArrays dst,
int maxlevel, int borderType = BORDER_DEFAULT );
cv::Mat src = imread("test.jpg");
vector<cv::Mat> ths;
buildPyramid(src, ths, 3, 4);
imshow("original", src);
imshow("level 0", ths[0]);
imshow("level 1", ths[1]);
imshow("level 2", ths[2]);
imshow("level 3", ths[3]);
// 向上采样
void pyrUp( InputArray src, OutputArray dst,
const Size& dstsize = Size(), int borderType = BORDER_DEFAULT );
// 向下采样
void pyrDown( InputArray src, OutputArray dst,
const Size& dstsize = Size(), int borderType = BORDER_DEFAULT );
// 向下采样。高斯平滑+缩小尺寸
pyrDown(src, th1, Size(0, 0), 4);
// 向上采样。放大尺寸+高斯平滑
pyrUp(th1, th2, Size(0, 0), 4);
void adaptiveThreshold( InputArray src, OutputArray dst,
double maxValue, int adaptiveMethod,
int thresholdType, int blockSize, double C );
adaptiveThreshold(src, th1, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 7, 5);
// 饱和度
cv::Mat Saturation(cv::Mat src, int percent);
// 明度
cv::Mat Lightness(cv::Mat src, float percent);
// 对比度
cv::Mat Contrast(cv::Mat src, int percent);
// 图像锐化
cv::Mat Sharpen(cv::Mat input, int percent, int type);
// 图像阴影选取
cv::Mat Shadow(cv::Mat input, int light);
// 图像高光选取
cv::Mat HighLight(cv::Mat input, int light);
// 色温调节
cv::Mat ColorTemperature(cv::Mat input, int percent);
cv::Mat src = imread("chocolate.jpg");
cv::Mat sat = Saturation(src, 20);
cv::Mat lig = Lightness(sat, -15);
cv::Mat con = Contrast(lig, 35);
cv::Mat sha = Sharpen(con, 10, 0);
cv::Mat sdo = Shadow(sha, 25);
cv::Mat hig = HighLight(sdo, -5);
cv::Mat ImageSplicing(vector<cv::Mat> images,int type)
{
if (type != 0 && type != 1)
type = 0;
int num = images.size();
int newrow = 0;
int newcol = 0;
cv::Mat result;
// 横向拼接
if (type == 0)
{
int minrow = 10000;
for (int i = 0; i < num; ++i)
{
if (minrow > images[i].rows)
minrow = images[i].rows;
}
newrow = minrow;
for (int i = 0; i < num; ++i)
{
int tcol = images[i].cols*minrow / images[i].rows;
int trow = newrow;
cv::resize(images[i], images[i], cv::Size(tcol, trow));
newcol += images[i].cols;
if (images[i].type() != images[0].type())
images[i].convertTo(images[i], images[0].type());
}
result = cv::Mat(newrow, newcol, images[0].type(), cv::Scalar(255, 255, 255));
cv::Range rangerow, rangecol;
int start = 0;
for (int i = 0; i < num; ++i)
{
rangerow = cv::Range((newrow - images[i].rows) / 2, (newrow - images[i].rows) / 2 + images[i].rows);
rangecol = cv::Range(start, start + images[i].cols);
images[i].copyTo(result(rangerow, rangecol));
start += images[i].cols;
}
}
// 纵向拼接
else if (type == 1) {
int mincol = 10000;
for (int i = 0; i < num; ++i)
{
if (mincol > images[i].cols)
mincol = images[i].cols;
}
newcol = mincol;
for (int i = 0; i < num; ++i)
{
int trow = images[i].rows*mincol / images[i].cols;
int tcol = newcol;
cv::resize(images[i], images[i], cv::Size(tcol, trow));
newrow += images[i].rows;
if (images[i].type() != images[0].type())
images[i].convertTo(images[i], images[0].type());
}
result = cv::Mat(newrow, newcol, images[0].type(), cv::Scalar(255, 255, 255));
cv::Range rangerow, rangecol;
int start = 0;
for (int i = 0; i < num; ++i)
{
rangecol= cv::Range((newcol - images[i].cols) / 2, (newcol - images[i].cols) / 2 + images[i].cols);
rangerow = cv::Range(start, start + images[i].rows);
images[i].copyTo(result(rangerow, rangecol));
start += images[i].rows;
}
}
return result;
}
void findNonZero( InputArray src, OutputArray idx );
vector<cv::Point> idx;
cv::findNonZero(test, idx);
cout << "number:" << idx.size() << endl;
for (auto i : idx)
{
cout << "x:" << i.x << " y:" << i.y << endl;
}
void copyMakeBorder(InputArray src, OutputArray dst,
int top, int bottom, int left, int right,
int borderType, const Scalar& value = Scalar() );
cv::copyMakeBorder(src, padded, 0, h - src.rows, 0, w - src.cols, cv::BORDER_CONSTANT, cv::Scalar::all(0));
https://www.caxkernel.com/author/93