不用预计算切向空间的Normal mapping

先贴出shader 吧 等有时间了 来阐述原理

// vertex shader

//varying vec3 ViewPosition;

//varying vec3 Normal;



varying vec3 Vertex_UV;

varying vec3 Vertex_Normal;

varying vec3 Vertex_LightDir;

varying vec3 Vertex_EyeVec;



void main(void)

{



    gl_Position      =   gl_ModelViewProjectionMatrix * gl_Vertex;

    Vertex_UV        =   gl_MultiTexCoord0.xyz ;

    Vertex_Normal    =   gl_NormalMatrix*gl_Normal;

    vec4 view_vertex =   gl_ModelViewMatrix*gl_Vertex;

    Vertex_LightDir  =   (gl_LightSource[0].position -  view_vertex).xyz;

    Vertex_EyeVec    =   (-view_vertex).xyz ;

    /*

    vec4 MVM        = (gl_ModelViewMatrix*gl_Vertex);

    ViewPosition     = MVM.xyz / MVM.w;

    Normal            = normalize(gl_NormalMatrix*gl_Normal);

    */

    

}
//uniform sampler2D tex0; // color map

uniform sampler2D normalMap; // normal map



//uniform int LightNum;



varying vec3 Vertex_UV;

varying vec3 Vertex_Normal;

varying vec3 Vertex_LightDir;

varying vec3 Vertex_EyeVec;



//out vec4 Out_Color;



mat3 cotangent_frame(vec3 N, vec3 p, vec2 uv)

{

    // get edge vectors of the pixel triangle

    vec3 dp1 = dFdx( p );

    vec3 dp2 = dFdy( p );

    vec2 duv1 = dFdx( uv );

    vec2 duv2 = dFdy( uv );

 

    // solve the linear system

    vec3 dp2perp = cross( dp2, N );

    vec3 dp1perp = cross( N, dp1 );

    vec3 T = dp2perp * duv1.x + dp1perp * duv2.x;

    vec3 B = dp2perp * duv1.y + dp1perp * duv2.y;

 

    // construct a scale-invariant frame 

    float invmax = inversesqrt( max( dot(T,T), dot(B,B) ) );

    return mat3( T * invmax, B * invmax, N );

}



vec3 perturb_normal( vec3 N, vec3 V, vec2 texcoord )

{

    // assume N, the interpolated vertex normal and 

    // V, the view vector (vertex to eye)

   vec3 map = texture(normalMap, texcoord ).xyz;

   map = map * 255./127. - 128./127.;

   mat3 TBN = cotangent_frame(N, -V, texcoord);

   return normalize(TBN * map);

}



void main(void){



  vec2 uv = Vertex_UV.xy;

  

  vec3 N = normalize(Vertex_Normal);

  vec3 L = normalize(Vertex_LightDir);

  vec3 V = normalize(Vertex_EyeVec);

  vec3 PN = perturb_normal(N, V, uv);



  //float lambertTerm = dot(PN, L);

  vec4 intensity=vec4(0.0,0.0,0.0,0.0); // 最终的颜色





  vec3 vDir,lDir,hDir;

  float NdotL,NdotHV;



 

  hDir  = normalize(V + L) ;

  NdotL  = max(dot(PN, L),    0.0);

  NdotHV = max(dot(PN, hDir), 0.0);



  intensity+= gl_LightSource[0].ambient * 0.5 ;

  intensity+= gl_LightSource[0].diffuse * NdotL * 0.3 ;



  if(NdotL!=0)

        intensity += gl_LightSource[0].specular * NdotL * pow(NdotHV,30);



  gl_FragColor = intensity;



}

 

 

最近做subsurface scattering ,发现normal mapping 根本加不上,因为SSS实质相当于对表面做了个平滑,细节再加也会被平滑掉。

后来想着把扰动后的normal 直接加在最终的SSS效果上,不过这样有个问题就是本来的法向图必须增强,因为在最终的效果上叠加法向图效果比较弱

你可能感兴趣的:(mapping)