滤波模板中的所有元素均为1/k,大小为k×k,未对边缘进行处理。
代码:
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_Average;
dstImage_Average.create(srcImage.size(), srcImage.type());
int k_Average = 3;
for (int row = k_Average / 2; row < srcImage.rows - k_Average / 2; row++)
for (int col = k_Average / 2; col < srcImage.cols - k_Average / 2; col++)
{
int pixel_sum = 0;
for (int a = -k_Average / 2; a <= k_Average / 2; a++)
for (int b = -k_Average / 2; b <= k_Average / 2; b++)
pixel_sum = pixel_sum + srcImage.at(row + a, col + b);
dstImage_Average.at(row, col) = pixel_sum / (k_Average*k_Average);
}
imshow("均值滤波", dstImage_Average);
waitKey(0);
}
内核为k×k的二维正态分布函数,未对边缘进行处理。
代码:
#include
using namespace cv;
double PI = 3.1415926536;
Mat mask_Gaussian(int k,float sigma);//k×k高斯滤波核
Mat mask_Gaussian(int k,float sigma)
{
double sum = 0.0;
Mat mask(k, k, CV_32F, Scalar::all(0));
for (int a = 0; a < k; ++a)
for (int b = 0; b < k; ++b)
mask.at(a, b) = exp(-((a - k / 2)*(a - k / 2) + (b - k / 2)*(b - k / 2)) / (2 * sigma*sigma))/ (2 * PI*sigma*sigma);
cout << mask;
return mask;
}
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_Gaussian;
dstImage_Gaussian.create(srcImage.size(), CV_8U);
int k_Gaussian = 11;
double sigma = (k_Gaussian*0.5 - 1)*0.3 + 0.8;
Mat mask = mask_Gaussian(k_Gaussian, sigma);
for (int row = k_Gaussian / 2; row < srcImage.rows - k_Gaussian / 2; row++)
for (int col = k_Gaussian / 2; col < srcImage.cols - k_Gaussian / 2; col++)
{
double pixel_sum = 0.0;
for (int a = -k_Gaussian / 2; a < k_Gaussian / 2; a++)
for (int b = -k_Gaussian / 2; b < k_Gaussian / 2; b++)
pixel_sum = pixel_sum + mask.at(a + k_Gaussian / 2, b + k_Gaussian / 2) * srcImage_float1.at(row + a, col + b);
dstImage_Gaussian.at(row, col) = pixel_sum;
}
imshow("高斯滤波", dstImage_Gaussian);
waitKey(0);
}
内核为k×k大小,掩模的中值代替掩模中心位置的像素。
代码:
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_median;
dstImage_median.create(srcImage.size(), srcImage.type());
int k_median = 11;
for (int row = k_median / 2; row < srcImage.rows - k_median / 2; row++)
for (int col = k_median / 2; col < srcImage.cols - k_median / 2; col++)
{
Mat pixel_group(k_median, k_median, CV_8U);//新建数组,将内核中的所有像素存储进来。
Mat pixel_group_median(1, k_median, CV_8U);//新建数组,存储内核中每一行的中值。
for (int a = -k_median / 2; a < k_median / 2; a++)
for (int b = -k_median / 2; b < k_median / 2; b++)
pixel_group.at(a + k_median / 2, b + k_median / 2) = srcImage.at(row + a, col + b);//对pixel_group进行赋值
cv::sort(pixel_group, pixel_group, SORT_ASCENDING);//对存储有内核像素的数组的每一行进行从大到小的排列。
for (int a = 0; a < k_median; a++)
pixel_group_median.at(0, a) = pixel_group.at(a, k_median / 2);//掩模范围中每一行中间位置的像素值就是每一行的中值,将其取出,并按数组序号从小到大依次放在pixel_group_median数组中
cv::sort(pixel_group_median, pixel_group_median, SORT_ASCENDING);//对pixel_group_median从大到小进行排列
dstImage_median.at(row, col) = pixel_group_median.at(0, k_median / 2);//pixel_group_median数组中间位置的像素值就是整个掩模范围内的中值。
}
imshow("中值滤波", dstImage_median);
waitKey(0);
}
两层:
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_Pseudocolor_2Layer;
dstImage_Pseudocolor_2Layer.create(srcImage_color.size(), CV_8UC3);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
if (srcImage.at(row, col) <= 127)
{
dstImage_Pseudocolor_2Layer.at(row, col)[0] = 0;
dstImage_Pseudocolor_2Layer.at(row, col)[1] = 255;
dstImage_Pseudocolor_2Layer.at(row, col)[2] = 0;
}
else
{
dstImage_Pseudocolor_2Layer.at(row, col)[0] = 0;
dstImage_Pseudocolor_2Layer.at(row, col)[1] = 0;
dstImage_Pseudocolor_2Layer.at(row, col)[2] = 255;
}
imshow("图像二值化", dstImage_Pseudocolor_2Layer);
waitKey(0);
}
#include
using namespace cv;
void main()
{
Mat dstImage_Pseudocolor_MultiLayer;
dstImage_Pseudocolor_MultiLayer.create(srcImage_color.size(), CV_8UC3);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
if (srcImage.at(row, col) <= 50)
{
dstImage_Pseudocolor_MultiLayer.at(row, col)[2] = 2.56*srcImage.at(row, col) + 127;
dstImage_Pseudocolor_MultiLayer.at(row, col)[0] = 0;
dstImage_Pseudocolor_MultiLayer.at(row, col)[1] = 0;
}
else if (srcImage.at(row, col) > 50 && srcImage.at(row, col) <= 100)
{
dstImage_Pseudocolor_MultiLayer.at(row, col)[2] = 255;
dstImage_Pseudocolor_MultiLayer.at(row, col)[0] = 0;
dstImage_Pseudocolor_MultiLayer.at(row, col)[1] = 2.56*srcImage.at(row, col) - 1;
}
else if (srcImage.at(row, col) > 100 && srcImage.at(row, col) <= 150)
{
dstImage_Pseudocolor_MultiLayer.at(row, col)[2] = -2.56*srcImage.at(row, col) + 384;
dstImage_Pseudocolor_MultiLayer.at(row, col)[0] = srcImage.at(row, col) - 100;
dstImage_Pseudocolor_MultiLayer.at(row, col)[1] = 255;
}
else if (srcImage.at(row, col) > 150 && srcImage.at(row, col) <= 200)
{
dstImage_Pseudocolor_MultiLayer.at(row, col)[2] = 0;
dstImage_Pseudocolor_MultiLayer.at(row, col)[0] = 255;
dstImage_Pseudocolor_MultiLayer.at(row, col)[1] = -srcImage.at(row, col) + 255;
}
else
{
dstImage_Pseudocolor_MultiLayer.at(row, col)[2] = 0;
dstImage_Pseudocolor_MultiLayer.at(row, col)[0] = -srcImage.at(row, col) + 382;
dstImage_Pseudocolor_MultiLayer.at(row, col)[1] = 0;
}
imshow("多层伪彩色", dstImage_Pseudocolor_MultiLayer);
waitKey(0);
}
原理:将各原图中的各颜色通道用别的颜色通道代替。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_PseudocolorTransformation(srcImage_color.size(), CV_8UC3);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
dstImage_PseudocolorTransformation.at(row, col)[0] = srcImage_color.at(row, col)[1];
dstImage_PseudocolorTransformation.at(row, col)[1] = srcImage_color.at(row, col)[2];
dstImage_PseudocolorTransformation.at(row, col)[2] = srcImage_color.at(row, col)[0];
}
imshow("假彩色变换", dstImage_PseudocolorTransformation);
waitKey(0);
}
原理:将像素进行变换,公式:新像素=255-原像素。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_Reverse(srcImage_color.size(), CV_8UC3);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
dstImage_Reverse.at(row, col)[0] = 255 - srcImage_color.at(row, col)[0];
dstImage_Reverse.at(row, col)[1] = 255 - srcImage_color.at(row, col)[1];
dstImage_Reverse.at(row, col)[2] = 255 - srcImage_color.at(row, col)[2];
}
imshow("图像反转", dstImage_Reverse);
watiKey(0);
}
原理:将原像素进行对数映射。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_LogarithmicTransformation(srcImage_color.size(), CV_8UC3);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
dstImage_LogarithmicTransformation.at(row, col)[0] = 45 * log(1 + srcImage_color.at(row, col)[0]);
dstImage_LogarithmicTransformation.at(row, col)[1] = 45 * log(1 + srcImage_color.at(row, col)[1]);
dstImage_LogarithmicTransformation.at(row, col)[2] = 45 * log(1 + srcImage_color.at(row, col)[2]);
}
imshow("对数变换", dstImage_LogarithmicTransformation);
waitKey(0);
}
原理:将原像素进行幂指数映射。注意一定要将原图的像素转换到0到1的范围,这样才能利用幂指数函数0到1范围的性质进行GAMMA变换。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_LogarithmicTransformation(srcImage_color.size(), CV_8UC3);
Mat dstImage_GAMMA(srcImage_color.size(), CV_8UC3);
double gamma = 4;
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
//首先将像素值归一化到0到1的范围内,然后再进入幂指数函数进行运算,运算完后再归一化回0到255的范围。
dstImage_GAMMA.at(row, col)[0] = saturate_cast(pow(srcImage_color.at(row, col)[0]/255.0, gamma)*255);
dstImage_GAMMA.at(row, col)[1] = saturate_cast(pow(srcImage_color.at(row, col)[1]/255.0, gamma)*255);
dstImage_GAMMA.at(row, col)[2] = saturate_cast(pow(srcImage_color.at(row, col)[2]/255.0, gamma)*255);
}
imshow("GAMMA变换", dstImage_GAMMA);
waitKey(0);
}
效果:
GAMMA=4时的效果,即映射的对应法则为,新像素=旧像素^4。
原理:扩展图像的动态范围。映射的对应法则为,(灰度最小值,0)点到(灰度最大值,255)点的一次函数。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_ContrastStretch(srcImage.size(), CV_8U);
int MinPixel, MaxPixel;
//得到最小的灰度级
//找图像中像素亮度最小值的原理:从灰度等级0开始统计每一级灰度总共的像素个数,当个数不等于0时,即找到了图像中最小的灰度值。
for (int pixel = 0; pixel < 256; pixel++)
{
int sum;
sum = 0;
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
if (srcImage.at(row, col) == pixel)
sum++;
}
if (sum != 0)
{
MinPixel = pixel;
break;
}
}
//得到最大的灰度级
//找图像中像素亮度最大值的原理:从灰度等级255开始统计每一级灰度总共的像素个数,当个数不等于0时,即找到了图像中最大的灰度值。
for (int pixel = 255; pixel >= 0; pixel--)
{
int sum = 0;
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
if (srcImage.at(row, col) == pixel)
sum++;
}
if (sum != 0)
{
MaxPixel = pixel;
break;
}
}
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
if (srcImage.at(row, col) >= MinPixel&&srcImage.at(row, col) <= MaxPixel)
//此为映射的对应法则
dstImage_ContrastStretch.at(row, col) = 255 * srcImage.at(row, col) / (MaxPixel - MinPixel) - 255 * MinPixel / (MaxPixel - MinPixel);
else
dstImage_ContrastStretch.at(row, col) = 0;
imshow("对比度拉伸", dstImage_ContrastStretch);
waitKey(0);
}
原理:将某一个灰度范围用特定颜色进行显示。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_GrayLayer(srcImage.size(), CV_8U);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
if (srcImage.at(row, col) >= 120 && srcImage.at(row, col) <= 180)
dstImage_GrayLayer.at(row, col) = 255;
else
dstImage_GrayLayer.at(row, col) = srcImage.at(row, col);
imshow("灰度级分层", dstImage_GrayLayer);
waitKey(0);
}
效果:
该程序将灰度范围从120到180的像素替换成了灰度级为255(全白)的像素。
原理:由于图像的灰度级别都是0到255,转换成二进制,则每个像素都是8个比特组成的。现在将每个像素的八个比特分成8个平面。再次将某一个比特位为1的像素赋予灰度级255(全白),某一个比特位为0的像素赋予灰度级0(全黑)。
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_BitLayer1(srcImage.size(), CV_8U);
Mat dstImage_BitLayer2(srcImage.size(), CV_8U);
Mat dstImage_BitLayer3(srcImage.size(), CV_8U);
Mat dstImage_BitLayer4(srcImage.size(), CV_8U);
Mat dstImage_BitLayer5(srcImage.size(), CV_8U);
Mat dstImage_BitLayer6(srcImage.size(), CV_8U);
Mat dstImage_BitLayer7(srcImage.size(), CV_8U);
Mat dstImage_BitLayer8(srcImage.size(), CV_8U);
for (int row = 0; row < srcImage.rows; row++)
for (int col = 0; col < srcImage.cols; col++)
{
//第一层,即最低位
if (srcImage.at(row, col) & 1)
dstImage_BitLayer1.at(row, col) = 255;
else
dstImage_BitLayer1.at(row, col) = 0;
//第二层
if (srcImage.at(row, col) & 2)
dstImage_BitLayer2.at(row, col) = 255;
else
dstImage_BitLayer2.at(row, col) = 0;
//第三层
if (srcImage.at(row, col) & 4)
dstImage_BitLayer3.at(row, col) = 255;
else
dstImage_BitLayer3.at(row, col) = 0;
//第四层
if (srcImage.at(row, col) & 8)
dstImage_BitLayer4.at(row, col) = 255;
else
dstImage_BitLayer4.at(row, col) = 0;
//第五层
if (srcImage.at(row, col) & 16)
dstImage_BitLayer5.at(row, col) = 255;
else
dstImage_BitLayer5.at(row, col) = 0;
//第六层
if (srcImage.at(row, col) & 32)
dstImage_BitLayer6.at(row, col) = 255;
else
dstImage_BitLayer6.at(row, col) = 0;
//第七层
if (srcImage.at(row, col) & 64)
dstImage_BitLayer7.at(row, col) = 255;
else
dstImage_BitLayer7.at(row, col) = 0;
//第八层
if (srcImage.at(row, col) & 128)
dstImage_BitLayer8.at(row, col) = 255;
else
dstImage_BitLayer8.at(row, col) = 0;
}
imshow("比特平面分层,最底层", dstImage_BitLayer1);
imshow("比特平面分层,第2层", dstImage_BitLayer2);
imshow("比特平面分层,第3层", dstImage_BitLayer3);
imshow("比特平面分层,第4层", dstImage_BitLayer4);
imshow("比特平面分层,第5层", dstImage_BitLayer5);
imshow("比特平面分层,第6层", dstImage_BitLayer6);
imshow("比特平面分层,第7层", dstImage_BitLayer7);
imshow("比特平面分层,第8层", dstImage_BitLayer8);
waitKey(0);
}
第一层(最底层)
第二层
第三层
第四层
第五层
第六层
第七层
第八层
可以看出最后几层构成了原图的大部分细节,所以可以通过减少图像比特层数来达到压缩图像的目的。
原理:图像锐化就是将灰度值变化比较强烈的地方突出显示。由于对于一阶微分来说,持续变化灰度级斜坡意味着一阶微分是一个较宽的非零常数,锐化后会产生太宽的边缘, 不美观;而二阶微分则会只会在斜坡起始端和结束端各有一个由零分开的起伏,其他区域都是0,所以产生的边缘较窄。对于二维图像来说,我们需要求图像的二阶偏微分,所以使用拉普拉斯算子,即图像在x方向和y方向的二阶微分之和。由于数字图像是离散的,所以在此要用到x方向和y方向的二阶差分之和,并将其用矩阵来表示。
注意,由于拉普拉斯算子中存在负值,而计算机显示图片的灰度级都是正的,所以首先要将原图像灰度级用浮点数进行表示,然后归一化到0到1的范围(为了防止锐化后的显示效果全白)。待浮点型原图像被拉普拉斯算子锐化完成后,由于saturate_cast会将负值变成0,所以图像会发黑。为了防止发黑,再此用浮点型原图像减去变换完的图像,从而得到正确的锐化效果。
//注:必须在浮点数范围内进行滤波
#include
using namespace cv;
void main()
{
Mat srcImage = imread("1.jpg", 0);
Mat dstImage_LaplaceSharpen(srcImage.size(), CV_32F);
Mat srcImage_float;
srcImage.convertTo(srcImage_float, CV_32F);//将输入图像变为浮点型,并储存到srcImage_float中
//输入图像像素值归一化至0到1的范围
for (int row = 1; row < srcImage.rows - 1; row++)
for (int col = 1; col < srcImage.cols - 1; col++)
srcImage_float.at(row, col) = srcImage_float.at(row, col) / 255;
//进行拉普拉斯锐化滤波
for (int row = 1; row < srcImage.rows-1; row++)
for (int col = 1; col < srcImage.cols - 1; col++)
{
dstImage_LaplaceSharpen.at(row, col) = saturate_cast(
srcImage_float.at(row - 1, col - 1) + srcImage_float.at(row - 1, col) +
srcImage_float.at(row - 1, col + 1) + srcImage_float.at(row, col - 1) +
srcImage_float.at(row, col + 1) + srcImage_float.at(row + 1, col - 1) +
srcImage_float.at(row + 1, col) + srcImage_float.at(row + 1, col + 1) -
8 * srcImage_float.at(row, col));
dstImage_LaplaceSharpen.at(row, col) = srcImage_float.at(row, col) - dstImage_LaplaceSharpen.at(row, col);
}
imshow("拉普拉斯锐化", dstImage_LaplaceSharpen);
waitKey(0);
}