Unity3D---通过Shader实现美颜

 

视频类APP没有美颜功能大概没人会用吧?

鉴于此,有点心血来潮,打算用Unity实现简单的美颜。

真正的商业级美颜算法是很复杂的,经过无数次打磨才成型,包括磨皮、美白、瘦脸、大眼等等细节,我这里只是用Unity3D的后处理技术做个简单的磨皮、美白。

首先,美颜一般针对的是脸部区域,我们得先识别出脸部区域。

完整的人脸识别算法,这在Unity3D中实现起来有点困难(不借助SDK),因此我们需要换个思路,仔细想想,我们也没必要进行人脸识别,我们大可识别出肤色区域,然后在肤色区域进行美颜。

Shader "Extand/Face/SkinCheck"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
				o.uv = v.uv;
                return o;
            }

			fixed4 check(fixed4 col)
			{
                //使用的是ycbcr颜色模型,一般肤色会在这个区间内
                //也可以使用RGB颜色模型,我试了下,感觉上面更准确
			    half u = (-0.169 * col.r - 0.331 * col.g + 0.5 * col.b + 0.5) * 255;
				half v = (0.5 * col.r - 0.419 * col.g - 0.081 * col.b + 0.5) * 255;

				fixed t1 = saturate(sign(u - 80));
				fixed t2 = saturate(sign(121 - u));
				fixed t3 = saturate(sign(v - 124));
				fixed t4 = saturate(sign(175 - v));

                //肤色区域 t=1
				fixed t = sign(t1 * t2 * t3 * t4);

                //只显示肤色区域
                //return col * t;

                //记录下肤色区域 t = 1
				return fixed4(col.rgb, t);
			}

            fixed4 frag (v2f i) : SV_Target
            {
                fixed4 col = tex2D(_MainTex, i.uv);
				return check(col);
            }
            ENDCG
        }

		Pass
        {
		    //降噪;
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv[9] : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
			fixed4 _MainTex_TexelSize;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);

				half size = 1;
				for(int m = 0; m < 2; m++)
				{
				    for(int n = 0; n < 2; n++)
				    {
					    float x = _MainTex_TexelSize.x * (n - 1);
						float y = _MainTex_TexelSize.y * (1 - m);
				        o.uv[m*3+n] = v.uv + float2(x, y) * size;
				    }
				}

                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
			    fixed4 color = tex2D(_MainTex, i.uv[4]);

                half alpha = 0;

				for(int m = 0; m < 2; m++)
				{
				    for(int n = 0; n < 2; n++)
				    {
					    fixed4 col = tex2D(_MainTex, i.uv[m*3+n]);
						alpha += col.a;
				    }
				}

				half a0 = saturate((alpha - color.a - 0.5) * 10);//周围全黑;
				half a1 = 1 - saturate((alpha - color.a - 7.5) * 10);//周围全白;

				return color * a0 * a1;
				//return fixed4(color.rgb, color.a * a0 * a1);
            }
            ENDCG
        }

		Pass
        {
		    //降噪---除去肤色小块;
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
			fixed4 _MainTex_TexelSize;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
				o.uv = v.uv;
				
                return o;
            }

			fixed isskin(v2f i)
			{
			    float r = min(_ScreenParams.x, _ScreenParams.y);
				r = round(r * 0.2);
				int step = max(1, round(r * 0.1));

				half rate = 1;
				//向四个方向射出射线;
			    for(int m = 0; m < 5; m++)
				{
				    half alpha = 0;
					half count = 0.01;

					for(int n = 0; n < r; n += step)
					{
						float x = n * ((m + 1) % 2) * sign(1 - m);
						float y = n * (m % 2) * sign(2 - m);
					
					    count += 1;
					    alpha += tex2D(_MainTex, i.uv + float2(x * _MainTex_TexelSize.x, y * _MainTex_TexelSize.y)).a;
					}

					//采样75%都是肤色,说明这个区域是脸部;
				    rate = rate * saturate((0.9 - alpha / count) * 1000);
				
				}

				return 1 - rate;
			}


            fixed4 frag (v2f i) : SV_Target
            {
			    fixed4 color = tex2D(_MainTex, i.uv);
				
				return color * color.a * isskin(i);
				//return fixed4(color.rgb, color.a * rate);
			
            }
            ENDCG
        }
    }
}

最终我们发现,肤色识别算法可以大体识别出肤色区域,但是会有噪点(环境中类似肤色的点被误识别),我们可以继续优化,去除部分噪点,也可以放任不管,毕竟这只是一个Mask, 环境中有少量的点被美颜了也没事。

接下来是磨皮,磨皮的本质是模糊算法,以祛痘祛斑。

模糊算法有很多,我这里主要选择的是双边滤波算法(一种保边算法),高斯模糊作为辅助,相较于一般模糊算法,双边滤波可以在降噪平滑的同时保留边缘的一些细节,至于具体的原理,请同学们自行搜索(懒Orz)

我们直接上核心源码,是不是简单粗暴?

Shader "Extand/Effect/BilateralFilters"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
		_BlurSize("BlurSize", Range(1,12)) = 1
		_SigmaS("_SigmaS", Range(1,10)) = 5
		_SigmaR("_SigmaR", Range(0.01,1)) = 0.09
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

		Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_TexelSize;
			float _BlurSize;
			float _SigmaS;
			float _SigmaR;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);

				o.uv = v.uv;

                return o;
            }

			fixed4 bilater(v2f i)
			{
	            half sigmas2 = 2 * _SigmaS * _SigmaS;//函数中常量系数,取5
				half sigmar2 = 2 * _SigmaR * _SigmaR;//函数中常量系数,取0.09
				half fenzi_r = 0,fenzi_g = 0,fenzi_b = 0;
                half fenmu_r = 0, fenmu_g = 0, fenmu_b = 0;
				fixed4 col = tex2D(_MainTex,i.uv);				

			    for(int m = 0; m < 5; m++)
				{
				    half mpingfang = pow(m - 2, 2);
				    for(int n = 0; n < 5; n++)
					{
                        //_BlurSize为模糊级别,数值越大,模糊程度越高,图像失真也越大
					    fixed4 tcol = tex2D(_MainTex,i.uv + float2(_MainTex_TexelSize.x * (m-2), _MainTex_TexelSize.y * (n-2)) * _BlurSize);
  
                        fixed4 ncol = col - tcol;
			            half npingfang = pow((n-2),2);
						half w_s = (mpingfang + npingfang) / sigmas2;
						half wr = pow(2.718, -(w_s + ncol.r * ncol.r / sigmar2));//e常量=2.718...
						half wg = pow(2.718, -(w_s + ncol.g * ncol.g / sigmar2));
						half wb = pow(2.718, -(w_s + ncol.b * ncol.b / sigmar2));
						fenmu_r += wr;
						fenmu_g += wg;
						fenmu_b += wb;
						fenzi_r += wr * tcol.r;
						fenzi_g += wg * tcol.g;
						fenzi_b += wb * tcol.b;
				    }
				}
				return fixed4(fenzi_r/fenmu_r, fenzi_g/fenmu_g, fenzi_b/fenmu_b, col.a);
			}

            fixed4 frag (v2f i) : SV_Target
            {
                return bilater(i);
            }
            ENDCG
        }
    }

	FallBack Off
}
Shader "Extand/Effect/GaussBlur"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
		_BlurSize("BlurSize", Range(1,20)) = 5
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv[5] : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_TexelSize;
			float _BlurSize;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);

				o.uv[0] = v.uv;
                //高斯-x方向的模糊(y方向同理)
				o.uv[1] = v.uv + float2(_MainTex_TexelSize.x * 1, 0) * _BlurSize;//_BlurSize模糊级别
				o.uv[2] = v.uv - float2(_MainTex_TexelSize.x * 1, 0) * _BlurSize;
				o.uv[3] = v.uv + float2(_MainTex_TexelSize.x * 2, 0) * _BlurSize;
				o.uv[4] = v.uv - float2(_MainTex_TexelSize.x * 2, 0) * _BlurSize;

                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
				float weight[3] = {0.4026, 0.2442, 0.0545};

				fixed3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0];
				
				for(int m = 1; m < 3; m++)
				{
				    sum += tex2D(_MainTex, i.uv[m * 2 - 1]).rgb * weight[m];
				    sum += tex2D(_MainTex, i.uv[m * 2]).rgb * weight[m];
				}

                return fixed4(sum, 1.0);
            }
            ENDCG
        }


		Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv[5] : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
            float4 _MainTex_TexelSize;
			float _BlurSize;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                
				o.uv[0] = v.uv;

				o.uv[1] = v.uv + float2(0, _MainTex_TexelSize.y * 1) * _BlurSize;
				o.uv[2] = v.uv - float2(0, _MainTex_TexelSize.y * 1) * _BlurSize;
				o.uv[3] = v.uv + float2(0, _MainTex_TexelSize.y * 2) * _BlurSize;
				o.uv[4] = v.uv - float2(0, _MainTex_TexelSize.y * 2) * _BlurSize;

                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
				float weight[3] = {0.4026, 0.2442, 0.0545};

				fixed3 sum = tex2D(_MainTex, i.uv[0]).rgb * weight[0];
				
				for(int m = 1; m < 3; m++)
				{
				    sum += tex2D(_MainTex, i.uv[m * 2 - 1]).rgb * weight[m];
				    sum += tex2D(_MainTex, i.uv[m * 2]).rgb * weight[m];
				}

                return fixed4(sum, 1.0);
            }
            ENDCG
        }
    }

	FallBack Off
}

再来,美白提亮算法

我也已经很贴心地帮你们把原理说明找到了

Unity3D---通过Shader实现美颜_第1张图片

核心源码技术点就是这些,我们只需要将这些整合一下,即可形成一个完整的美颜功能。

Unity3D---通过Shader实现美颜_第2张图片

Shader "Extand/Face/Beauty"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
		_BilateralTex ("_BlurTex", 2D) = "white" {}
		_GaussTex ("GaussTex", 2D) = "white" {}
		_SkinTex ("SkinTex", 2D) = "white" {}

		_SkinWhite("SkinWhite", Range(0,1)) = 0
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            
            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            sampler2D _MainTex;
			float4 _BlurTex_TexelSize;
			sampler2D _BlurTex;
			sampler2D _GaussTex;
			sampler2D _SkinTex;
			float _SkinWhite;

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
				o.uv = v.uv;
                return o;
            }

			fixed4 skin(fixed4 col)
			{
			    half u = (-0.169 * col.r - 0.331 * col.g + 0.5 * col.b + 0.5) * 255;
				half v = (0.5 * col.r - 0.419 * col.g - 0.081 * col.b + 0.5) * 255;

				fixed t1 = saturate(sign(u - 80));
				fixed t2 = saturate(sign(121 - u));
				fixed t3 = saturate(sign(v - 124));
				fixed t4 = saturate(sign(175 - v));

				fixed t = sign(t1 * t2 * t3 * t4);
				return fixed4(col.r, col.g, col.b, t);
			}

			half luminance(fixed4 color){
			    return 0.2125 * color.r + 0.7154 * color.g + 0.0721 * color.b;
			}

			fixed4 bright(fixed4 col)
			{
				//美颜提亮算法
				half BrightLevel = 5; 
			    half3 temp = (0,0,0);
			    temp.x = log(col.r * (BrightLevel - 1) + 1) / log(BrightLevel);
			    temp.y = log(col.g * (BrightLevel - 1) + 1) / log(BrightLevel);
			    temp.z = log(col.b * (BrightLevel - 1) + 1) / log(BrightLevel);
			    return  fixed4(temp, col.a);
			}

            fixed4 frag (v2f i) : SV_Target
            {
			    fixed4 col = tex2D(_MainTex, i.uv);      //原图
				fixed4 cskin = tex2D(_SkinTex, i.uv);    //肤色Mask
				fixed4 bilater = tex2D(_BlurTex, i.uv);  //双边过滤
				fixed4 gauss = tex2D(_GaussTex, i.uv);   //高斯模糊
				//按照我们的设想,只需要对肤色区域进行双边过滤,再提亮即可完成美颜
				//而实际上,这样做的效果不算理想,因为双边过滤算法虽然是保边算法,但它不可能做到绝对保边
				//因此,我们需要再给模糊后的纹理,增加脸部细节 
				//主要算法原理: 
				//1.原图 = 模糊 + 细节  --->  细节 = 原图 - 模糊   
				//2.增强 = 模糊 + 细节 * k
				//这一步具有很强的主观性,是试出来的 
				//0.2 * (col - bilater)   是取原图双边过滤剩下的细节
				//0.8 * (bilater - gauss) 是取原图双边过滤再高斯模糊剩下的细节
				half4 nblur = bilater + 0.2 * (col - bilater) + 0.8 * (bilater - gauss);
				nblur.r = saturate(nblur.r);//防止颜色值溢出
				nblur.g = saturate(nblur.g);
				nblur.b = saturate(nblur.b);
				//使用肤色Mask,如果是肤色区域,即取模糊值,否则取原图
				fixed4 final = lerp(col, fixed4(nblur.rgb,1) , cskin.a);
				//提亮
				fixed4 cbright = bright(final);
				//根据提亮级别插值
				final = lerp(final, cbright , _SkinWhite);
			 
				final.a = 1;
                return final;
            }

            ENDCG
        }
    }

	FallBack Off
}

搞定收工!

你可能感兴趣的:(shader,unity3d,算法)