在搭建完成opencv 的环境之后,我们开始记录一些简单的图像处理的过程:
以上是合成之后的效果.暂时看效果.
接下来具体实现的步骤.,下面代码直接可以拿来使用.
//读取三张图片
QString imgqstrR = QStringLiteral("%1%2%3.bmp").arg(QCoreApplication::applicationDirPath()).arg("\\FrameDirec\\")
.arg(137);
QString imgqstrG = QStringLiteral("%1%2%3.bmp").arg(QCoreApplication::applicationDirPath()).arg("\\FrameDirec\\")
.arg(138);
QString imgqstrB = QStringLiteral("%1%2%3.bmp").arg(QCoreApplication::applicationDirPath()).arg("\\FrameDirec\\")
.arg(139);
//图像处理步骤,色彩还原步骤
//1 提取图像的亮度 r g b 的各个亮度.
double brightness_r = _qimage2MatInteroperate->getAvg(imgqstrR);
double brightness_g = _qimage2MatInteroperate->getAvg(imgqstrG);
double brightness_b = _qimage2MatInteroperate->getAvg(imgqstrB);
//求取亮度平均数值
double ageRgb = (brightness_r + brightness_g + brightness_b) / 3;
cv::Mat dst_r;
cv::Mat dst_g;
cv::Mat dst_b;
//重新设置亮度.
_qimage2MatInteroperate->setAvg(imgqstrR, dst_r, ageRgb);
_qimage2MatInteroperate->setAvg(imgqstrG, dst_g, ageRgb);
_qimage2MatInteroperate->setAvg(imgqstrB, dst_b, ageRgb);
//查看其亮度直方图.
cv::Mat histogram_R, histogram_g, histogram_b;
_qimage2MatInteroperate->DrawHistogramRectangular(dst_r, histogram_R, Scalar(255), true, "hisogramR");
_qimage2MatInteroperate->DrawHistogramRectangular(dst_g, histogram_g, Scalar(255), true, "hisogramG");
_qimage2MatInteroperate->DrawHistogramRectangular(dst_b, histogram_b, Scalar(255), true, "hisogramB");
//测试平均之后的亮度. 测试,RGB三色灰度图经过平均之后,其整体亮度差距不是很大
brightness_r = _qimage2MatInteroperate->getAvg(dst_r);
brightness_g = _qimage2MatInteroperate->getAvg(dst_g);
brightness_b = _qimage2MatInteroperate->getAvg(dst_b);
qDebug() << QString("%1 %2 %3").arg(brightness_r).arg(brightness_g).arg(brightness_b);
//3 将亮度平衡之后的图片,合成一张图片.
cv::Mat outDst, outDst2;
_qimage2MatInteroperate->mergeRGB(dst_r, dst_g, dst_b, outDst, outDst2, true);
//接下来就是上面所_qimage2MatInteroperate 这个对象调用的一些分装好的一些算法
可以直接使用
/绘制直方图
int Qimage2MatInteroperate::DrawHistogramRectangular(cv::Mat & srcMat, cv::Mat &dst, Scalar color, bool isShowWnd, QString title)
{
std::string titlestrPath = std::string((const char*)title.toLocal8Bit());
const char* titlestr = titlestrPath.data();
cv::Mat srcImage = srcMat.clone();
if (!srcImage.data)
{
std::cout << "fail to load image" << endl;
return 0;
}
//定义变量
Mat dstHist;
int dims = 1;
float hranges[] = { 0, 256 };
const float *ranges[] = { hranges }; // 这里需要为const类型
int size = 256;
int channels = 0;
//计算图像的直方图
calcHist(&srcImage, 1, &channels, Mat(), dstHist, dims, &size, ranges);
Mat dstImage(size, size, CV_8U, Scalar(0));
//获取最大值和最小值
double minValue = 0;
double maxValue = 0;
minMaxLoc(dstHist, &minValue, &maxValue, 0, 0); // 在cv中用的是cvGetMinMaxHistValue
//绘制出直方图
//saturate_cast函数的作用即是:当运算完之后,结果为负,则转为0,结果超出255,则为255。
int hpt = saturate_cast
for (int i = 0; i < 256; i++)
{
float binValue = dstHist.at
//拉伸到0-max
int realValue = saturate_cast
line(dstImage, Point(i, size - 1), Point(i, size - realValue), color);
}
if (isShowWnd == true)
{
imshow("原图" + titlestrPath, srcImage);
imshow(titlestrPath, dstImage);
}
dst = dstImage.clone();
return 0;
}
///设置平均亮度的.
void Qimage2MatInteroperate::setAvg(QString scr, Mat &dst, double avg)
{
scr.replace("/", "\\");
std::string path = std::string((const char*)scr.toLocal8Bit());
const char* strpath = path.data();
cv::Mat source = imread(strpath);
if (!source.data)
{
return;
}
double fpreAvg = getAvg(source);
source.convertTo(dst, source.type(), avg / fpreAvg);
}
///获取平均亮度的.
double Qimage2MatInteroperate::getAvg(Mat img)
{
Mat gray;
cvtColor(img, gray, CV_RGB2GRAY);
Scalar scalar = mean(gray);
return scalar.val[0];
}
/这个算法,就是将原图中,不均匀的亮度,平均的分散到整幅图上(这个参考别人的.)
void Qimage2MatInteroperate::unevenLightCompensate(Mat & image, int blockSize)
{
//三色通道 要转成Gray灰度图.
if (image.channels() == 3) cvtColor(image, image, COLOR_RGB2GRAY);
//求取整个灰度图的平均值.
double average = mean(image)[0];
//可以求渠道总共大小为 (blockSize*blockSize)rows_new*cols_new = image.cols*image.rows
int rows_new = ceil(double(image.rows) / double(blockSize));
int cols_new = ceil(double(image.cols) / double(blockSize));
//这里得到了 blockSize*blockSize 个大小的图像.
Mat blockImage;
blockImage = Mat::zeros(rows_new, cols_new, CV_32FC1);
for (int i = 0; i < rows_new; i++)
{
for (int j = 0; j < cols_new; j++)
{
int rowmin = i * blockSize;
int rowmax = (i + 1)*blockSize;
//不超过图片的高
if (rowmax > image.rows)
{
rowmax = image.rows;
}
int colmin = j * blockSize;
int colmax = (j + 1)*blockSize;
//不操过图片的宽.
if (colmax > image.cols)
{
colmax = image.cols;
}
Mat imageROI = image(Range(rowmin, rowmax), Range(colmin, colmax));
double temaver = mean(imageROI)[0];
blockImage.at
}
}
//以上循环 得到了 一个 rows_new 和 cols_new 大小的那块区域 平均值的一个映射图.
//此步得到了 一个 减去整体原图平均灰度值之后 的一个映射图.
blockImage = blockImage - average;
//INTER_NEAREST = CV_INTER_NN, //!<最近邻插值
// INTER_LINEAR = CV_INTER_LINEAR, //!< 双线性插值
// INTER_CUBIC = CV_INTER_CUBIC, //!< 双三次插值
// INTER_AREA = CV_INTER_AREA, //!< 基于区域(或超级)的插值
// INTER_LANCZOS4 = CV_INTER_LANCZOS4, //!< Lanczos插值超过8x8邻域
// INTER_MAX = 7,
// WARP_INVERSE_MAP = CV_WARP_INVERSE_MAP
Mat blockImage2;
//这里,将得到的那块映射区域 进行了双三次插值之后,重新变换到到了原图一样的大小.
resize(blockImage, blockImage2, image.size(), (0, 0), (0, 0), INTER_CUBIC);
Mat image2;
image.convertTo(image2, CV_32FC1);
//image2 得到了一组和原图一样大小的单通道的数值.
Mat dst = image2 - blockImage2;
//这里获取到 插值之后的映射图 - 单通道图...
//通过图像相减去,医学上,散斑干涉法、全息滤波法、干涉滤波法和光栅编码法
//去除一幅图像中不需要的加性图案,如缓慢变化的背景阴影,周期性噪声等;
//检测同一场景的两幅图像之间的变化
//再次转换为单通道.
dst.convertTo(image, CV_8UC1);
}
///这个才是将三张灰度图合成到一张上面来.
void Qimage2MatInteroperate::mergeRGB(cv::Mat imgqR, cv::Mat imgqG, cv::Mat imgqB, cv::Mat &outMergeDst, cv::Mat &outMergeBalance, bool IsWhite)
{
const char* str_R = ".\\imgqr.bmp";
const char* str_G = ".\\imgqg.bmp";
const char* str_B = ".\\imgqb.bmp";
imwrite(str_R, imgqR);
imwrite(str_G, imgqG);
imwrite(str_B, imgqB);
IplImage *srcImgR = cvLoadImage(str_R, 0);//读取图片 0 灰度图. -1
IplImage *srcImgG = cvLoadImage(str_G, 0);//读取图片
IplImage *srcImgB = cvLoadImage(str_B, 0);//读取图片
IplImage *dstImg = cvCreateImage(cvGetSize(srcImgR), 8, 3);
IplImage *dstImg_Balance = cvCreateImage(cvGetSize(srcImgR), 8, 3);
cvNamedWindow("src_R", CV_WINDOW_NORMAL);
cvShowImage("src_R", srcImgR);
cvNamedWindow("src_G", CV_WINDOW_NORMAL);
cvShowImage("src_G", srcImgG);
cvNamedWindow("src_B", CV_WINDOW_NORMAL);
cvShowImage("src_B", srcImgB);
cvMerge(srcImgR, srcImgG, srcImgB, NULL, dstImg);//合并操作后的通道,为最终结果
//这里合并和可以使用addweighted
//cvAddWeighted();
//不均匀光补偿方式.
cv::Mat mat_r, mat_g, mat_b, mat_dstImg_Balance;
mat_r = cv::cvarrToMat(srcImgR);
mat_g = cv::cvarrToMat(srcImgG);
mat_b = cv::cvarrToMat(srcImgB);
/*mat_r = imgqR.clone();
mat_g = imgqG.clone();
mat_b = imgqB.clone();*/
int blocksize = 100; //目测 75 - 100
unevenLightCompensate(mat_r, 90);//75 80 80 偏黄色。 92 100 100
unevenLightCompensate(mat_g, blocksize); //偏红色
unevenLightCompensate(mat_b, blocksize); //片绿色 冷硬色
srcImgR = &IplImage(mat_r);
srcImgG = &IplImage(mat_g);
srcImgB = &IplImage(mat_b);
if (IsWhite == true)
{
color_balance(srcImgR, Banlece_RGB::R, 0.00366);//对R通道进行色彩平衡
color_balance(srcImgG, Banlece_RGB::G, 0.00334);//对G通道进行色彩平衡
color_balance(srcImgB, Banlece_RGB::B, 0.003);//对B通道进行色彩平衡
cvMerge(srcImgR, srcImgG, srcImgB, NULL, dstImg_Balance);//合并操作后的通道,为最终结果
//合并完成之后,做一下掩模运算.增加对比度.
mat_dstImg_Balance = cv::cvarrToMat(dstImg_Balance);
//GaussianBlur(mat_dstImg_Balance, mat_dstImg_Balance,cv::Size(5,5),0,0);
//中滤波
medianBlur(mat_dstImg_Balance, mat_dstImg_Balance, 3);
//bilateralFilter(mat_dstImg_Balance,mat_dstImg_Balance,1,30,3);
cv::Mat kernel = (Mat_
filter2D(mat_dstImg_Balance, mat_dstImg_Balance, -1, kernel, Point(-1, -1));
}
cvNamedWindow("dst", CV_WINDOW_NORMAL);
cvShowImage("dst", dstImg);
cv::Mat temp(dstImg);
cv::imwrite("balance1.bmp", temp);
cvNamedWindow("dstImg_Balance", CV_WINDOW_NORMAL);
cvShowImage("dstImg_Balance", dstImg_Balance);
cv::Mat temp2(dstImg_Balance);
cv::imwrite("balance2.bmp", temp2);
}