双边滤波的思想是抑制与中心像素差别太大的像素,简单的来说:起到滤波保边的效果。
目录
双边滤波算法考虑到了空间域和值域两个方面,实现双边滤波的基本思路是:
对于模板的各个点来说:
1、从空间域出发,计算出模板的点与目标点的距离(利用勾股定理),然后计算得出空域权重
2、从值域出发,计算出模板的点与目标点值的差值(取绝对值),然后计算得到值域权重
4、同时让模板各个点的像素值乘以该点的w,并加起来得到sum_pixel
5、最后让sum_pixel除以sumw就得到目标点最终的像素值了
其中i,j是变化的,(i, j)对应模板的各个点,(k,l)是你要求的像素点,也就是模板的中心点
/*
sigma_color参数是上式中的σd, sigma_space参数是上式中的σr
*/
#pragma once
#include "core/core.hpp" //3个opencv库的头文件
#include "highgui/highgui.hpp"
#include "imgproc/imgproc.hpp"
#include
using namespace cv;
static void bilateralFilter_8u(const Mat& src, Mat& dst, int d, double sigma_color, double sigma_space, int borderType)
{
int cn = src.channels();//获得图片的通道数
int i, j, k, maxk, radius;
Size size = src.size();
CV_Assert((src.type() == CV_8UC1 || src.type() == CV_8UC3) &&
src.type() == dst.type() && src.size() == dst.size() &&
src.data != dst.data);
if (sigma_color <= 0)
sigma_color = 1;
if (sigma_space <= 0)
sigma_space = 1;
double gauss_color_coeff = -0.5 / (sigma_color*sigma_color);//分母值
double gauss_space_coeff = -0.5 / (sigma_space*sigma_space);//分母值
if (d <= 0)
radius = cvRound(sigma_space*1.5);//进行四舍五入
else
radius = d / 2;
radius = MAX(radius, 1);
d = radius * 2 + 1;
Mat temp;
copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
/*复制图像并且制作边界。(处理边界卷积)
目的是为了放大图像好做图像的边界处理*/
vector<float> _color_weight(cn * 256);//用来存放值域差值对应的权重
vector<float> _space_weight(d*d);//用来存放空间距离对应的权重
vector<int> _space_ofs(d*d);//用来存放模板各点与锚点(中心点)的偏移量
float* color_weight = &_color_weight[0];
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// initialize color-related bilateral filter coefficients 函数1
//由于sigma_color已经给定,所以可以先算出差值为0-255时,对应的高斯相似度权重。
for (i = 0; i < 256 * cn; i++)
color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
// initialize space-related bilateral filter coefficients 函数2
//由于sigma_space已经给定,所以一旦选定好模板就可计算出高斯距离权重了
for (i = -radius, maxk = 0; i <= radius; i++)
for (j = -radius; j <= radius; j++)
{
double r = std::sqrt((double)i*i + (double)j*j);
/*if (r > radius)
continue;*/
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*temp.step + j*cn);
}
for (i = 0; i < size.height; i++)
{
const uchar* sptr = temp.data + (i + radius)*temp.step + radius*cn;
uchar* dptr = dst.data + i*dst.step;
///灰度图
if (cn == 1)
{
for (j = 0; j < size.width; j++)
{
float sum = 0, wsum = 0;
int val0 = sptr[j];
for (k = 0; k < maxk; k++)
{
int val = sptr[j + space_ofs[k]];
float w = space_weight[k] * color_weight[std::abs(val - val0)];
sum += val*w;
wsum += w;
}
// overflow is not possible here => there is no need to use CV_CAST_8U
dptr[j] = (uchar)cvRound(sum / wsum);
}
}
else
{
//彩色图
assert(cn == 3);
for (j = 0; j < size.width * 3; j += 3)
{
float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
int b0 = sptr[j], g0 = sptr[j + 1], r0 = sptr[j + 2];
for (k = 0; k < maxk; k++)
{
const uchar* sptr_k = sptr + j + space_ofs[k];
int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
float w = space_weight[k] * color_weight[std::abs(b - b0) +
std::abs(g - g0) + std::abs(r - r0)];
sum_b += b*w; sum_g += g*w; sum_r += r*w;
wsum += w;
}
wsum = 1.f / wsum;
b0 = cvRound(sum_b*wsum);
g0 = cvRound(sum_g*wsum);
r0 = cvRound(sum_r*wsum);
dptr[j] = (uchar)b0; dptr[j + 1] = (uchar)g0; dptr[j + 2] = (uchar)r0;
}
}
}
}
高斯距离权重值还受到模块的的大小影响( space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);),因此高斯距离权值受到一定的约束,但是高斯相似度权值则没有这样的约束。
因此为了给高斯相似度权值增加约束,让其根据图片进行自适应,而不人为进行指定,就得根据式子:
sigma_color =((模板内所有的像素值的平方的和)*(模板的点的数量)-(模板所有的像素值的和)^2 ) / (模板的点的数量)^2
算出sigma_color的值:
double sigma_color= ((sumsqr*n) - sum*sum) / ((double)(n*n));
然后再计算gauss_color_cofee的值
double gauss_color_coeff = -0.5 / (double)(sigma_color*sigma_color);
之后根据模板的点与运算点的差值,算出相似度权值:
colorr_Weight = (double)std::exp((std::abs(r - r0))*(std::abs(r - r0))*gauss_colorr_coeff);
(与opencv源码的实现方式有所不同,当算法的思想是一致的)
//获得自适应值
double adaptive_gausscolor(float sum, float sumsqr, int n, double maxSigma_color)
{
double sigma_color= ((sumsqr*n) - sum*sum) / ((double)(n*n));
if (sigma_color < 0.01)sigma_color = 0.01;
else
if (sigma_color >(double)maxSigma_color)
sigma_color = (double)maxSigma_color;
double gauss_color_coeff = -0.5 / (double)(sigma_color*sigma_color);
return gauss_color_coeff;
}
自适应双边滤波算法
void adaptivebilateralFilter(Mat& src, Mat& dst, int d, double sigma_space, double sigmacolor_max, int borderType)
{
int cn = src.channels();//获得图片的通道数
int i, j, k, maxk, radius;
Size size = src.size();
CV_Assert((src.type() == CV_8UC1 || src.type() == CV_8UC3) &&
src.type() == dst.type() && src.size() == dst.size() &&
src.data != dst.data);
if (sigma_space <= 0)
sigma_space = 1;
CV_Assert(d & 1);//确保为奇数
double gauss_space_coeff = -0.5 / (sigma_space*sigma_space);
if (d <= 0)
radius = cvRound(sigma_space*1.5);//进行四舍五入
else
radius = d / 2;
radius = MAX(radius, 1);
d = radius * 2 + 1;
Mat temp;
copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
/*复制图像并且制作边界。(处理边界卷积)
目的是为了放大图像好做图像的边界处理*/
vector<float> _space_weight(d*d);
vector<int> _space_ofs(d*d);
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// initialize color-related bilateral filter coefficients 函数1
//由于sigma_color没有给定,所以无法算出差值为0-255时,对应的高斯相似度权重。
/*for (i = 0; i < 256 * cn; i++)
color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);*/
// initialize space-related bilateral filter coefficients
//由于sigma_space已经给定,所以一旦选定好模板就可计算出高斯距离权重了
for (i = -radius, maxk = 0; i <= radius; i++)
for (j = -radius; j <= radius; j++)
{
double r = std::sqrt((double)i*i + (double)j*j);
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*temp.step + j*cn);
}
for (i = 0; i < size.height; i++)
{
const uchar* sptr = temp.data + (i + radius)*temp.step + radius*cn;
uchar* dptr = dst.data + i*dst.step;
if (cn == 1)
{
for (j = 0; j < size.width; j++)
{
float sum = 0, wsum = 0, sumValsqr = 0;
int val0 = sptr[j];
//获得自适应的高斯相似度的sigma值并计算相似度
for (k = 0; k < maxk; k++)
{
int val = sptr[j + space_ofs[k]];
sum += val;
sumValsqr += (val*val);
}
double gauss_color_coeff=adaptive_gausscolor(sum, sumValsqr, maxk, sigmacolor_max);
sum = 0;
for (k = 0; k < maxk; k++)
{
int val = sptr[j + space_ofs[k]];
int temp = std::abs(val - val0);
float color_Weight =(float)std::exp((float)temp*temp*gauss_color_coeff);
float w = space_weight[k] * color_Weight;
sum += val*w;
wsum += w;
}
// overflow is not possible here => there is no need to use CV_CAST_8U
dptr[j] = (uchar)cvRound(sum / wsum);
}
}
else
{
assert(cn == 3);
for (j = 0; j < size.width * 3; j += 3)
{
float sum_b = 0, sum_g = 0, sum_r = 0, wbsum = 0, wgsum = 0, wrsum = 0, sum_bsqr = 0, sum_gsqr = 0, sum_rsqr = 0;
int b0 = sptr[j], g0 = sptr[j + 1], r0 = sptr[j + 2];
//获得自适应的高斯相似度的sigma值并计算相似度
for (k = 0; k < maxk; k++)
{
const uchar* sptr_k = sptr + j + space_ofs[k];
int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
sum_b += b; sum_g += g; sum_r += r;
sum_bsqr += b*b; sum_gsqr += g*g; sum_rsqr += r*r;
}
double gauss_colorb_coeff=adaptive_gausscolor(sum_b, sum_bsqr, maxk, sigmacolor_max);
double gauss_colorg_coeff=adaptive_gausscolor(sum_g, sum_gsqr, maxk, sigmacolor_max);
double gauss_colorr_coeff=adaptive_gausscolor(sum_r, sum_rsqr, maxk, sigmacolor_max);
sum_b = 0; sum_g = 0; sum_r = 0;
for (k = 0; k < maxk; k++)
{
const uchar* sptr_k = sptr + j + space_ofs[k];
int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
double colorb_Weight = (double)std::exp((std::abs(b - b0))*(std::abs(b - b0))*gauss_colorb_coeff);
double colorg_Weight = (double)std::exp((std::abs(g - g0))*(std::abs(g - g0))*gauss_colorg_coeff);
double colorr_Weight = (double)std::exp((std::abs(r - r0))*(std::abs(r - r0))*gauss_colorr_coeff);
float wb = space_weight[k] * colorb_Weight;
float wg= space_weight[k] * colorg_Weight;
float wr=space_weight[k] * colorr_Weight;
sum_b += b*wb; sum_g += g*wg; sum_r += r*wr;
wbsum += wb;
wgsum += wg;
wrsum += wr;
}
wbsum = 1.f / wbsum;
wgsum = 1.f / wgsum;
wrsum = 1.f / wrsum;
b0 = cvRound(sum_b*wbsum);
g0 = cvRound(sum_g*wgsum);
r0 = cvRound(sum_r*wrsum);
dptr[j] = (uchar)b0; dptr[j + 1] = (uchar)g0; dptr[j + 2] = (uchar)r0;
}
}
}
}