主要涉及函数
Input.GetAxis(“Mouse x”) 可取得鼠标横向(x轴)移动增量
Input.GetAxis(“Mouse y”) 可取得鼠标竖向(y轴)移动增量
通过勾股定理获取拖拽长度,长度越长旋转越快
在project setting--Input 可以设置
直接上代码,看了就明白了
using UnityEngine;
using System.Collections;
public class startRoate : MonoBehaviour
{
private bool onDrag = false; //是否被拖拽//
public float speed = 6f; //旋转速度//
private float tempSpeed; //阻尼速度//
private float axisX = 1;
//鼠标沿水平方向移动的增量//
private float axisY = 1; //鼠标沿竖直方向移动的增量//
private float cXY;
void OnMouseDown()
{
//接受鼠标按下的事件//
axisX = 0f; axisY = 0f;
}
void OnMouseDrag() //鼠标拖拽时的操作//
{
onDrag = true;
axisX = -Input.GetAxis("moveX");
//获得鼠标增量//
axisY = Input.GetAxis("moveY");
cXY = Mathf.Sqrt(axisX * axisX + axisY * axisY); //计算鼠标移动的长度//
if (cXY == 0f) { cXY = 1f; }
}
float Rigid() //计算阻尼速度//
{
if (onDrag)
{
tempSpeed = speed;
}
else
{
if (tempSpeed > 0)
{
tempSpeed -= speed * 2 * Time.deltaTime / cXY; //通过除以鼠标移动长度实现拖拽越长速度减缓越慢//
}
else {
tempSpeed = 0;
}
}
return tempSpeed;
}
void Update()
{
// this.transform.Rotate(new Vector3(axisY, axisX, 0) * Rigid(), Space.World); //这个是是按照之前方向一直慢速旋转
if (!Input.GetMouseButton(0))
{
onDrag = false;
this.transform.Rotate(new Vector3(axisY, axisX, 0)*0.5f, Space.World);
}
}
}
https://en.wikibooks.org/wiki/Cg_Programming/Unity/Cookies
存在一个问题就是可以投射阴影但是不接受阴影
Shader "Cg per-pixel lighting with cookies" {
Properties {
_Color ("Diffuse Material Color", Color) = (1,1,1,1)
_SpecColor ("Specular Material Color", Color) = (1,1,1,1)
_Shininess ("Shininess", Float) = 10
}
SubShader {
Pass {
Tags { "LightMode" = "ForwardBase" } // pass for ambient light
// and first directional light source without cookie
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4 _LightColor0;
// color of light source (from "Lighting.cginc")
// User-specified properties
uniform float4 _Color;
uniform float4 _SpecColor;
uniform float _Shininess;
struct vertexInput {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct vertexOutput {
float4 pos : SV_POSITION;
float4 posWorld : TEXCOORD0;
float3 normalDir : TEXCOORD1;
};
vertexOutput vert(vertexInput input)
{
vertexOutput output;
float4x4 modelMatrix = _Object2World;
float4x4 modelMatrixInverse = _World2Object;
output.posWorld = mul(modelMatrix, input.vertex);
output.normalDir = normalize(
mul(float4(input.normal, 0.0), modelMatrixInverse).xyz);
output.pos = mul(UNITY_MATRIX_MVP, input.vertex);
return output;
}
float4 frag(vertexOutput input) : COLOR
{
float3 normalDirection = normalize(input.normalDir);
float3 viewDirection = normalize(
_WorldSpaceCameraPos - input.posWorld.xyz);
float3 lightDirection =
normalize(_WorldSpaceLightPos0.xyz);
float3 ambientLighting =
UNITY_LIGHTMODEL_AMBIENT.rgb * _Color.rgb;
float3 diffuseReflection =
_LightColor0.rgb * _Color.rgb
* max(0.0, dot(normalDirection, lightDirection));
float3 specularReflection;
if (dot(normalDirection, lightDirection) < 0.0)
// light source on the wrong side?
{
specularReflection = float3(0.0, 0.0, 0.0);
// no specular reflection
}
else // light source on the right side
{
specularReflection = _LightColor0.rgb
* _SpecColor.rgb * pow(max(0.0, dot(
reflect(-lightDirection, normalDirection),
viewDirection)), _Shininess);
}
return float4(ambientLighting + diffuseReflection
+ specularReflection, 1.0);
}
ENDCG
}
Pass {
Tags { "LightMode" = "ForwardAdd" }
// pass for additional light sources
Blend One One // additive blending
CGPROGRAM
#pragma multi_compile_lightpass
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4 _LightColor0;
// color of light source (from "Lighting.cginc")
uniform float4x4 _LightMatrix0; // transformation
// from world to light space (from Autolight.cginc)
#if defined (DIRECTIONAL_COOKIE) || defined (SPOT)
uniform sampler2D _LightTexture0;
// cookie alpha texture map (from Autolight.cginc)
#elif defined (POINT_COOKIE)
uniform samplerCUBE _LightTexture0;
// cookie alpha texture map (from Autolight.cginc)
#endif
// User-specified properties
uniform float4 _Color;
uniform float4 _SpecColor;
uniform float _Shininess;
struct vertexInput {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct vertexOutput {
float4 pos : SV_POSITION;
float4 posWorld : TEXCOORD0;
// position of the vertex (and fragment) in world space
float4 posLight : TEXCOORD1;
// position of the vertex (and fragment) in light space
float3 normalDir : TEXCOORD2;
// surface normal vector in world space
};
vertexOutput vert(vertexInput input)
{
vertexOutput output;
float4x4 modelMatrix = _Object2World;
float4x4 modelMatrixInverse = _World2Object;
output.posWorld = mul(modelMatrix, input.vertex);
output.posLight = mul(_LightMatrix0, output.posWorld);
output.normalDir = normalize(
mul(float4(input.normal, 0.0), modelMatrixInverse).xyz);
output.pos = mul(UNITY_MATRIX_MVP, input.vertex);
return output;
}
float4 frag(vertexOutput input) : COLOR
{
float3 normalDirection = normalize(input.normalDir);
float3 viewDirection = normalize(
_WorldSpaceCameraPos - input.posWorld.xyz);
float3 lightDirection;
float attenuation = 1.0;
// by default no attenuation with distance
#if defined (DIRECTIONAL) || defined (DIRECTIONAL_COOKIE)
lightDirection = normalize(_WorldSpaceLightPos0.xyz);
#elif defined (POINT_NOATT)
lightDirection = normalize(
_WorldSpaceLightPos0 - input.posWorld.xyz);
#elif defined(POINT)||defined(POINT_COOKIE)||defined(SPOT)
float3 vertexToLightSource =
_WorldSpaceLightPos0.xyz - input.posWorld.xyz;
float distance = length(vertexToLightSource);
attenuation = 1.0 / distance; // linear attenuation
lightDirection = normalize(vertexToLightSource);
#endif
float3 diffuseReflection =
attenuation * _LightColor0.rgb * _Color.rgb
* max(0.0, dot(normalDirection, lightDirection));
float3 specularReflection;
if (dot(normalDirection, lightDirection) < 0.0)
// light source on the wrong side?
{
specularReflection = float3(0.0, 0.0, 0.0);
// no specular reflection
}
else // light source on the right side
{
specularReflection = attenuation * _LightColor0.rgb
* _SpecColor.rgb * pow(max(0.0, dot(
reflect(-lightDirection, normalDirection),
viewDirection)), _Shininess);
}
float cookieAttenuation = 1.0;
// by default no cookie attenuation
#if defined (DIRECTIONAL_COOKIE)
cookieAttenuation = tex2D(_LightTexture0,
input.posLight.xy).a;
#elif defined (POINT_COOKIE)
cookieAttenuation = texCUBE(_LightTexture0,
input.posLight.xyz).a;
#elif defined (SPOT)
cookieAttenuation = tex2D(_LightTexture0,
input.posLight.xy / input.posLight.w
+ float2(0.5, 0.5)).a;
#endif
return float4(cookieAttenuation
* (diffuseReflection + specularReflection), 1.0);
}
ENDCG
}
}
Fallback "Specular"
}
贴图旋转有时可以产生更多不一样的效果和细节,比如对于岩石,贴图旋转可以带来更多变化
http://forum.unity3d.com/threads/rotation-of-texture-uvs-directly-from-a-shader.150482/
Shader "Custom/RotateUVs" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
_RotationSpeed ("Rotation Speed", Float) = 2.0
}
SubShader {
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Lambert vertex:vert
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
};
float _RotationSpeed;
void vert (inout appdata_full v) {
float sinX = sin ( _RotationSpeed * _Time );
float cosX = cos ( _RotationSpeed * _Time );
float sinY = sin ( _RotationSpeed * _Time );
float2x2 rotationMatrix = float2x2( cosX, -sinX, sinY, cosX);
v.texcoord.xy = mul ( v.texcoord.xy, rotationMatrix );
}
void surf (Input IN, inout SurfaceOutput o) {
half4 c = tex2D (_MainTex, IN.uv_MainTex);
o.Albedo = c.rgb;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
http://forum.unity3d.com/threads/rotating-multiple-textures-in-a-shader.368457/
For Unity there's basically two different ways to do shaders (well, 4, but one is effectively deprecated, and the other is only for platform specific stuff), surface shader and vert / frag shader. Behind the scenes Unity convers surface shaders into expanded vert / frag shaders, which then get converted into platform specific shaders, which then get compiled into the final shader code that is what's sent to the drivers which convert that into the final form the GPU actually uses. It's a deep rabbit hole so we'll stick to the top two levels. The deprecated method is the "fixed function" shaders if you're curious. This is what shaders used to look like before you could do arbitrary math, but these two now just get converted into vert / frag shaders.
So, lets start with vert frag for now. The basic rotation in the vertex shader.
Shader "Unlit/Unlit UV Rotation in vertex"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_Rotation ("Rotation", Range(0,360)) = 0.0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float _Rotation;
v2f vert (appdata v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
// rotating UV
const float Deg2Rad = (UNITY_PI * 2.0) / 360.0;
float rotationRadians = _Rotation * Deg2Rad; // convert degrees to radians
float s = sin(rotationRadians); // sin and cos take radians, not degrees
float c = cos(rotationRadians);
float2x2 rotationMatrix = float2x2( c, -s, s, c); // construct simple rotation matrix
v.uv -= 0.5; // offset UV so we rotate around 0.5 and not 0.0
v.uv = mul(rotationMatrix, v.uv); // apply rotation matrix
v.uv += 0.5; // offset UV again so UVs are in the correct location
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}
This is the default "new Shader > Unlit" modified to add rotation. This is probably where you're at now, at least something similar. The form is a little different from most of the other threads on UV rotation because most people get confused by the degree to radian conversion (ie: they don't do it) and they're doing the mul in the wrong order.
So, now we want multiple textures and UV sets with different rotations. This is just a matter of adding additional UVs to the v2f struct and doing the math multiple times.
Shader "Unlit/Unlit UV Rotation of multiple textures in vertex"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_RotatedTexA ("Texture", 2D) = "white" {}
_RotationA ("Rotation", Range(0,360)) = 0.0
_RotatedTexB ("Texture", 2D) = "white" {}
_RotationB ("Rotation", Range(0,360)) = 0.0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
float2 rotateUV(float2 uv, float degrees)
{
// rotating UV
const float Deg2Rad = (UNITY_PI * 2.0) / 360.0;
float rotationRadians = degrees * Deg2Rad; // convert degrees to radians
float s = sin(rotationRadians); // sin and cos take radians, not degrees
float c = cos(rotationRadians);
float2x2 rotationMatrix = float2x2( c, -s, s, c); // construct simple rotation matrix
uv -= 0.5; // offset UV so we rotate around 0.5 and not 0.0
uv = mul(rotationMatrix, uv); // apply rotation matrix
uv += 0.5; // offset UV again so UVs are in the correct location
return uv;
}
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 uv2 : TEXCOORD1; // Addition additional UV to pass
UNITY_FOG_COORDS(2) // changed from 1 to 2 since uv2 is using TEXCOORD1 now
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _RotatedTexA;
float4 _RotatedTexA_ST;
float _RotationA;
sampler2D _RotatedTexB;
float4 _RotatedTexB_ST;
float _RotationB;
v2f vert (appdata v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.uv2.xy = TRANSFORM_TEX(rotateUV(v.uv, _RotationA), _RotatedTexA);
o.uv2.zw = TRANSFORM_TEX(rotateUV(v.uv, _RotationB), _RotatedTexB);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// sample rotated textures
fixed4 colA = tex2D(_RotatedTexA, i.uv2.xy);
fixed4 colB = tex2D(_RotatedTexB, i.uv2.zw);
// adding the textures together just so you can see them all
col = (col + colA + colB) / 3.0;
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}
The rotation code is now a separate function so we can reuse it. We also have a second UV set, a float4 instead of a float2, to the v2f struct and we're using the xy and zw components to pack two UV sets into a single parameter for efficiency.
Now what about doing the rotation in the fragment shader?
Shader "Unlit/Unlit UV Rotation of multiple textures in fragment"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
_RotatedTexA ("Texture", 2D) = "white" {}
_RotationA ("Rotation", Range(0,360)) = 0.0
_RotatedTexB ("Texture", 2D) = "white" {}
_RotationB ("Rotation", Range(0,360)) = 0.0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
float2 rotateUV(float2 uv, float degrees)
{
// rotating UV
const float Deg2Rad = (UNITY_PI * 2.0) / 360.0;
float rotationRadians = degrees * Deg2Rad; // convert degrees to radians
float s = sin(rotationRadians); // sin and cos take radians, not degrees
float c = cos(rotationRadians);
float2x2 rotationMatrix = float2x2( c, -s, s, c); // construct simple rotation matrix
uv -= 0.5; // offset UV so we rotate around 0.5 and not 0.0
uv = mul(rotationMatrix, uv); // apply rotation matrix
uv += 0.5; // offset UV again so UVs are in the correct location
return uv;
}
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _RotatedTexA;
float4 _RotatedTexA_ST;
float _RotationA;
sampler2D _RotatedTexB;
float4 _RotatedTexB_ST;
float _RotationB;
v2f vert (appdata v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv = v.uv;
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
float2 mainTex_uv = TRANSFORM_TEX(i.uv, _MainTex);
fixed4 col = tex2D(_MainTex, mainTex_uv);
// sample rotated textures
float2 uvA = TRANSFORM_TEX(rotateUV(i.uv, _RotationA), _RotatedTexA);
float2 uvB = TRANSFORM_TEX(rotateUV(i.uv, _RotationB), _RotatedTexB);
fixed4 colA = tex2D(_RotatedTexA, uvA);
fixed4 colB = tex2D(_RotatedTexB, uvB);
// adding the textures together just so you can see them all
col = (col + colA + colB) / 3.0;
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}
Shader "Custom/Surface UV Rotation in vertex" {
Properties {
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
[NoScaleOffset] _RotatedTex ("Texture", 2D) = "white" {}
_Rotation ("Rotation", Range(0,360)) = 0.0
}
SubShader {
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard fullforwardshadows vertex:vert
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
float2 rotateUV(float2 uv, float degrees)
{
// rotating UV
const float Deg2Rad = (UNITY_PI * 2.0) / 360.0;
float rotationRadians = degrees * Deg2Rad; // convert degrees to radians
float s = sin(rotationRadians); // sin and cos take radians, not degrees
float c = cos(rotationRadians);
float2x2 rotationMatrix = float2x2( c, -s, s, c); // construct simple rotation matrix
uv -= 0.5; // offset UV so we rotate around 0.5 and not 0.0
uv = mul(rotationMatrix, uv); // apply rotation matrix
uv += 0.5; // offset UV again so UVs are in the correct location
return uv;
}
sampler2D _MainTex;
sampler2D _RotatedTex;
struct Input {
float2 uv_MainTex;
float2 rotatedUV;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
float _Rotation;
void vert (inout appdata_full v, out Input o) {
UNITY_INITIALIZE_OUTPUT(Input,o);
o.rotatedUV = rotateUV(v.texcoord, _Rotation);
}
void surf (Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color;
// rotated texture
fixed4 c2 = tex2D(_RotatedTex, IN.rotatedUV);
// blend the two together so we can see them
c = (c + c2) / 2.0;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
}
ENDCG
}
FallBack "Diffuse"
}
I also disabled the in editor texture scaling and offset for the rotated texture, just because that adds another layer of weirdness in surface shaders.
http://blog.sina.com.cn/s/blog_697b1b8c0101eg9f.html
有时候场景中一大批物体都需要制作成预制物体,但是unity只能手动一个一个的创建,感觉非常的蹩脚,下面一个编辑器类的方法解决你的麻烦。
01 |
static Object CreatePrefab(GameObject go, string name) |
02 |
{ |
03 |
//先创建一个空的预制物体 |
04 |
//预制物体保存在工程中路径,可以修改("Assets/" + name + ".prefab"); |
05 |
Object tempPrefab = EditorUtility.CreateEmptyPrefab( "Assets/" + name + ".prefab" ); |
06 |
//然后拿我们场景中的物体替换空的预制物体 |
07 |
tempPrefab = EditorUtility.ReplacePrefab(go, tempPrefab); |
08 |
//返回创建后的预制物体 |
09 |
return tempPrefab; |
10 |
} |
这个方法可以随意根据任何规则来写,比如可以遍历一个物体的所有子物体,全部制作成预制物体保存到你的工程中,代码如下:
01 |
[MenuItem( "Tools/BatchPrefab All Children" )] |
02 |
public static void BatchPrefab(){ |
03 |
Transform tParent = ((GameObject)Selection.activeObject).transform; |
04 |
|
05 |
Object tempPrefab; |
06 |
int i = 0; |
07 |
foreach (Transform t in tParent){ |
08 |
tempPrefab = EditorUtility.CreateEmptyPrefab( "Assets/Prefab/prefab" + i + ".prefab" ); |
09 |
tempPrefab = EditorUtility.ReplacePrefab(t.gameObject, tempPrefab); |
10 |
i ++; |
11 |
} |
12 |
} |
上面代码中,在unity中添加了一个工具的菜单/BatchPrefab All Children(批量为所有子物体制作预物体),首先获取场景中选中的物体,遍历其所有子物体,为每一个子物体制作预制物体保存在工程种的目录下
Shader "Custom/test" {
Properties {
_MainTex ("Albedo (RGB)", 2D) = "green" {}
_Test ("test", Float) = 2.2
_Color ("C", Color) = (1.0, 1.0,1.0,1.0)
}
SubShader
{
Tags { "RenderType"="Opaque" }
Pass
{
Name "FORWARD"
Tags { "LightMode" = "ForwardBase" }
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#include "Lighting.cginc"
struct VS_OUTPUT
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};
uniform float _Test;
float4 Gamma2Linear(float4 c)
{
return pow(c, _Test);
}
float4 Linear2Gamma(float4 c)
{
return pow(c, 1.0 / _Test);
}
VS_OUTPUT vert(appdata_tan i)
{
VS_OUTPUT o;
o.pos = mul(UNITY_MATRIX_MVP, i.vertex);
o.uv = i.texcoord.xy;
return o;
}
uniform sampler _MainTex;
float4 frag(VS_OUTPUT i): COLOR
{
float4 c = float4(i.uv.x, i.uv.x, i.uv.x, 1.0);
return Linear2Gamma(c);
}
ENDCG
}
}
FallBack "Diffuse"
}
https://forum.unity3d.com/threads/about-gamma-correction.353987/
https://forum.unity3d.com/threads/problem-using-linear-space.253622/
http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
https://community.unity.com/t5/Shaders/Saturation-Shader/td-p/1520622
http://www.clonefactor.com/wordpress/program/unity3d/1513/
https://forum.unity3d.com/threads/hue-saturation-brightness-contrast-shader.260649/
1.判断目标在自己的前后方位可以使用下面的方法:
Vector3.Dot(transform.forward, target.position)
返回值为正时,目标在自己的前方,反之在自己的后方
2.判断目标在机子的左右方位可以使用下面的方法:
Vector3.Cross(transform.forward, target.position).y
返回值为正时,目标在自己的右方,反之在自己的左方
3.在这里顺便解说下关于空间向量的点积和叉积:
A.点积
点积的计算方式为: a·b=|a|·|b|cos 其中|a|和|b|表示向量的模,表示两个向量的夹角。另外在 点积 中,和 夹角是不分顺序的。
所以通过点积,我们其实是可以计算两个向量的夹角的。
另外通过点积的计算我们可以简单粗略的判断当前物体是否朝向另外一个物体: 只需要计算当前物体的transform.forward向量与 otherObj.transform.position 的点积即可, 大于0则在前方,否则在后方。
B.叉积
叉积的定义: c =a x b 其中a,b,c均为向量。即两个向量的叉积得到的还是向量!
性质1: c⊥a,c⊥b,即向量c垂直与向量a,b所在的平面 。
性质2: 模长|c|=|a||b|sin
性质3: 满足右手法则 。从这点我们有axb ≠ bxa,而axb = – bxa。所以我们可以使用叉积的正负值来判断向量a,b的相对位置,即向量b是处于向量a的顺时针方向还是逆时针方向
if (Input.GetAxis("Mouse ScrollWheel")!=0)
{
cam.transform.position += cam.transform.forward* Input.GetAxis("Mouse ScrollWheel");
}
You can use GPU instancing to draw many identical objects with only a few draw calls. There are some restrictions that you need to bear in mind:
A Standard Surface Shader that supports instancing is available in the Unity Editor. Add one to your project by selectingShader >Standard Surface Shader (Instanced).
Adding the Standard Instanced ShaderApply this Shader to your GameObject’s Material. In your Material’s Inspector window, click theShader drop-down, roll over theInstanced field, and choose your instanced Shader from the list:
Assigning the Standard Instanced Shader to a MaterialEven though the instanced GameObjects are sharing the same Mesh and Material, you can set Shader properties on a per-object basis using theMaterialPropertyBlock API. In the example below, each GameObject is assigned a random color value using the_Color
property:
MaterialPropertyBlock props = new MaterialPropertyBlock();
MeshRenderer renderer;
foreach (GameObject obj in objects)
{
float r = Random.Range(0.0f, 1.0f);
float g = Random.Range(0.0f, 1.0f);
float b = Random.Range(0.0f, 1.0f);
props.SetColor("_Color", new Color(r, g, b));
renderer = obj.GetComponent();
renderer.SetPropertyBlock(props);
}
The following example takes a simple unlit Shader and makes it capable of instancing:
Shader "SimplestInstancedShader"
{
Properties
{
_Color ("Color", Color) = (1, 1, 1, 1)
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_instancing
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
UNITY_INSTANCE_ID
};
struct v2f
{
float4 vertex : SV_POSITION;
UNITY_INSTANCE_ID
};
UNITY_INSTANCING_CBUFFER_START (MyProperties)
UNITY_DEFINE_INSTANCED_PROP (float4, _Color)
UNITY_INSTANCING_CBUFFER_END
v2f vert (appdata v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID (v);
UNITY_TRANSFER_INSTANCE_ID (v, o);
o.vertex = UnityObjectToClipPos (v.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
UNITY_SETUP_INSTANCE_ID (i);
return UNITY_ACCESS_INSTANCED_PROP (_Color);
}
ENDCG
}
}
}
Addition | Function |
---|---|
#pragma multi_compile_instancing | multi_compile_instancing generates a Shader with two variants: one with built-in keywordINSTANCING_ON defined (allowing instancing), the other with nothing defined. This allows the Shader to fall back to a non-instanced version if instancing isn’t supported on the GPU. |
UNITY_INSTANCE_ID | This is used in the vertex Shader input/output structure to define an instance ID. SeeSV_InstanceID for more information. |
UNITY_INSTANCING_CBUFFER_START(name) /UNITY_INSTANCING_CBUFFER_END | Every per-instance property must be defined in a specially named constant buffer. Use this pair of macros to wrap the properties you want to be made unique to each instance. |
UNITY_DEFINE_INSTANCED_PROP(float4, color) | This defines a per-instance Shader property with a type and a name. In this example, the_color property is unique. |
UNITY_SETUP_INSTANCE_ID(v); | This makes the instance ID accessible to Shader functions. It must be used at the very beginning of a vertex Shader, and is optional for fragment Shaders. |
UNITY_TRANSFER_INSTANCE_ID(v, o); | This copies the instance ID from the input structure to the output structure in the vertex Shader. This is only necessary if you need to access per-instance data in the fragment Shader. |
UNITY_ACCESS_INSTANCED_PROP(color) | This accesses a per-instance Shader property. It uses an instance ID to index into the instance data array. |
Note: As long as Material properties are instanced, Renderers can always be rendered instanced, even if you put different instanced properties into different Renderers. Normal (non-instanced) properties cannot be batched, so do not put them in the MaterialPropertyBlock
. Instead, create different Materials for them.
UnityObjectToClipPos(v.vertex)
is always preferred where mul(UNITY_MATRIX_MVP,v.vertex)
would otherwise be used. While you can continue to useUNITY_MATRIX_MVP
as normal in instanced Shaders,UnityObjectToClipPos
is the most efficient way of transforming vertex positions from object space into clip space.
In instanced Shaders, UNITY_MATRIX_MVP
(among other built-in matrices) is transparently modified to include an extra matrix multiply. Specifically, it is expanded tomul(UNITY_MATRIX_VP, unity_ObjectToWorld)
.unity_ObjectToWorld
is expanded tounity_ObjectToWorldArray[unity_InstanceID]
).
UnityObjectToClipPos
is optimized to perform two matrix-vector multiplications simultaneously, and is therefore more efficient than performing the multiplication manually, because the Shader compiler does not automatically perform this optimization.
For vertex and fragment Shaders, Unity needs to change the way vertex transformations are calculated in multi-pass scenarios (for example, in the ForwardAdd pass) to avoid z-fighting artifacts against the base/first passes due to floating point errors in matrix calculation. To do this, add #pragma force_concat_matrix
to the Shader.
Specifically, the vertex transformation in the ForwardAdd
pass is calculated by multiplying the M (model) matrix with the VP (view and projection) matrix instead of using a CPU-precomputed MVP matrix.
This is not necessary for surface Shaders, because the correct calculation is automatically substituted.
Static batching takes priority over instancing. If a GameObject is marked for static batching and is successfully batched, instancing is disabled even if its Renderer uses an instancing Shader. When this happens, a warning box appears in the Inspector suggesting that the Static Batching flag be unchecked in the Player Settings.
Instancing takes priority over dynamic batching. If Meshes can be instanced, dynamic batching is disabled.
addshadow
option to force the generation of an instanced shadow pass.UNITY_MAX_INSTANCE_COUNT
with an integer before including any .cginc file allows you to limit the maximum number of instances an instanced draw call can draw. This allows for more properties per instance in the instance constant buffer. You can achieve the same result when using a surface Shader with#pragma instancing_options maxcount:number
. The default value of this max instance count is 500. For OpenGL, the actual value is one quarter of the value you specify, so 125 by default.https://docs.unity3d.com/Manual/SL-TextureArrays.html
Similar to regular 2D textures (Texture2D class,sampler2D in shaders), cube maps (Cubemap class,samplerCUBE in shaders), and 3D textures (Texture3D class,sampler3D in shaders), Unity also supports 2D texture arrays.
A texture array is a collection of same size/format/flags 2D textures that look like a single object to the GPU, and can be sampled in the shader with a texture element index. They are useful for implementing custom terrain rendering systems or other special effects where you need an efficient way of accessing many textures of the same size and format. Elements of a 2D texture array are also known as slices, or layers.
Texture arrays need to be supported by the underlying graphics API and the GPU. They are available on:
Other platforms do not support texture arrays (Direct3D 9, OpenGL ES 2.0 or WebGL 1.0). UseSystemInfo.supports2DArrayTextures to determine texture array support at runtime.
As there is no texture import pipeline for texture arrays, they must be created from within your scripts. Use theTexture2DArray class to create and manipulate them. Note that texture arrays can be serialized as assets, so it is possible to create and fill them with data from editor scripts.
Normally, texture arrays are used purely within GPU memory, but you can use Graphics.CopyTexture, Texture2DArray.GetPixels and Texture2DArray.SetPixels to transfer pixels to and from system memory.
Texture array elements may also be used as render targets. Use RenderTexture.dimension to specify in advance whether the render target is to be a 2D texture array. ThedepthSlice argument toGraphics.SetRenderTarget specifies which mipmap level or cube map face to render to. On platforms that support “layered rendering” (i.e. geometry shaders), you can set thedepthSlice argument to –1 to set the whole texture array as a render target. You can also use a geometry shader to render into individual elements.
Since texture arrays do not work on all platforms, shaders need to use an appropriatecompilation target to access them. The minimum shader model compilation target that supports texture arrays is 3.5.
Use these macros to declare and sample texture arrays:
The following shader example samples a texture array using object space vertex positions as coordinates:
Shader "Example/Sample2DArrayTexture"
{
Properties
{
_MyArr ("Tex", 2DArray) = "" {}
_SliceRange ("Slices", Range(0,16)) = 6
_UVScale ("UVScale", Float) = 1.0
}
SubShader
{
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// to use texture arrays we need to target DX10/OpenGLES3 which
// is shader model 3.5 minimum
#pragma target 3.5
#include "UnityCG.cginc"
struct v2f
{
float3 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
float _SliceRange;
float _UVScale;
v2f vert (float4 vertex : POSITION)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, vertex);
o.uv.xy = (vertex.xy + 0.5) * _UVScale;
o.uv.z = (vertex.z + 0.5) * _SliceRange;
return o;
}
UNITY_DECLARE_TEX2DARRAY(_MyArr);
half4 frag (v2f i) : SV_Target
{
return UNITY_SAMPLE_TEX2DARRAY(_MyArr, i.uv);
}
ENDCG
}
}
}
https://docs.unity3d.com/ScriptReference/MaterialProperty.PropFlags.html
Flags that control how a MaterialProperty is displayed.
None | No flags are set. |
HideInInspector | Do not show the property in the inspector. |
PerRendererData | Texture value for this property will be queried from renderer's MaterialPropertyBlock, instead of from the material. This corresponds to the "[PerRendererData]" attribute in front of a property in the shader code. |
NoScaleOffset | Do not show UV scale/offset fields next to a texture. |
Normal | Signifies that values of this property contain Normal (normalized vector) data. |
HDR | Signifies that values of this property contain High Dynamic Range (HDR) data. |
https://docs.unity3d.com/ScriptReference/AssetDatabase.GetAssetPath.html
instanceID | The instance ID of the asset. |
assetObject | A reference to the asset. |
string The asset path name, or null, or an empty string if the asset does not exist.
Returns the path name relative to the project folder where the asset is stored.
All paths are relative to the project folder, for example: "Assets/MyTextures/hello.png".
using UnityEngine; using UnityEditor; public class CreateMaterialExample : MonoBehaviour { [MenuItem("GameObject/Create Material")] static void CreateMaterial( ) { // Create a simple material asset Material material = new Material( Shader.Find( "Specular" ) ); AssetDatabase.CreateAsset( material, "Assets/MyMaterial.mat" ); // Print the path of the created asset Debug.Log( AssetDatabase.GetAssetPath( material ) ); } }
Debug.Log(AssetDatabase.GetAssetPath(this.GetComponent().material.GetTexture("_MainTex")));