实现流程:
首先需要准备一个shader来做屏幕特效,shader中输出屏幕上面模型的深度信息,用来计算模型像素的坐标。
Shader "Custom/Depth" {
SubShader{
Tags{ "RenderType" = "Opaque" }
Pass{
ZTest Always Cull Off ZWrite Off
CGPROGRAM
// Use shader model 3.0 target, to get nicer looking lighting
#pragma glsl
#pragma fragmentoption ARB_precision_hint_fastest
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
#include "unityCG.cginc"
sampler2D _CameraDepthTexture;
struct v2f {
float4 pos : SV_POSITION;
float4 scrPos:TEXCOORD0;
float2 uv : TEXCOORD1;
};
//Vertex Shader
v2f vert(appdata_base v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.scrPos = ComputeScreenPos(o.pos);
o.uv = v.texcoord;
return o;
}
//Fragment Shader
float4 frag(v2f i) :COLOR{
float depthValue = Linear01Depth(tex2Dproj(_CameraDepthTexture, UNITY_PROJ_COORD(i.scrPos)).r);
return float4(depthValue, depthValue, depthValue, 1.0f);
}
ENDCG
}
}
FallBack "Diffuse"
}
我们生成一个射线检测摄像机,只渲染需要参与射线检测的模型,摄像机指向鼠标的位置。在OnRenderImage()中使用上面的shader获取一个像素的深度输出rgb值,然后以此计算当前像素的坐标,即得到鼠标在模型上的位置。下面包含了模型上取两点测量它们在面上的最短距离。
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class RaycastTest : MonoBehaviour
{
private float degree = 0;
private Shader depthShader;
private RenderTexture hitTex;
private int hitWidth = 1;
private int hitHeight = 1;
private LayerMask meshLayerMask;
private Texture2D tex;
private Color c;
public Transform dot;
private bool OnMeasure;
private bool isEnd;
private Vector3 startPos;
private Vector3 endPos;
private Vector3 upVector;
private Vector3 forwardVector;
private List pointsPos = new List();
private List minPointsPos = new List();
private const int pointNum = 40;
public LineRenderer lineRender;
private List hitResult = new List();
private List hitCam = new List();
private int _hitCamNum = 1;
public Transform cameraContent;
public int hitCamNum
{
set
{
_hitCamNum = value;
for (int i = 0; i < hitCam.Count; i++)
{
Destroy(hitCam[i].gameObject);
}
hitCam.Clear();
hitResult.Clear();
for (int i = 0; i < value; ++i)
{
GameObject hitCamObj = new GameObject("HitCam" + i.ToString());
hitCamObj.transform.SetParent(cameraContent);
Camera tempcam = hitCamObj.AddComponent();
tempcam.stereoTargetEye = StereoTargetEyeMask.None;
tempcam.enabled = false;
hitCam.Add(tempcam);
hitResult.Add(Vector3.zero);
}
for (int j = 0; j < hitCamNum; ++j)
{
hitCam[j].clearFlags = CameraClearFlags.SolidColor;
hitCam[j].backgroundColor = Color.black;
}
}
get
{
return _hitCamNum;
}
}
void Start()
{
hitCamNum = 1;
for (int j = 0; j < hitCamNum; ++j)
{
hitCam[j].clearFlags = CameraClearFlags.SolidColor;
hitCam[j].backgroundColor = Color.black;
}
meshLayerMask = LayerMask.GetMask("objMesh");
tex = new Texture2D(hitWidth, hitHeight, TextureFormat.RGBAFloat, false);
hitTex = RenderTexture.GetTemporary(hitWidth, hitHeight, 24, RenderTextureFormat.ARGBFloat);
depthShader = Shader.Find("Custom/Depth");
}
private void Update()
{
if (OnMeasure)
{
GetAllPoints();
}
else
{
dot.transform.position = RayCastBody();
if (Input.GetMouseButtonDown(0) && RayCastBody().magnitude > 0.0001f)
{
if (isEnd)
{
endPos = RayCastBody();
OnMeasure = true;
//得到垂直于起点和终点向量的平面上互相垂直的两个向量
upVector = Vector3.Cross(Vector3.up, startPos - endPos).normalized;
forwardVector = Vector3.Cross(upVector, startPos - endPos).normalized;
hitCamNum = pointNum;
isEnd = false;
}
else
{
startPos = RayCastBody();
isEnd = true;
}
}
}
}
private Vector3 RayCastBody()
{
Ray ray = new Ray();
ray = Camera.main.ScreenPointToRay(Input.mousePosition);
hitCam[0].transform.position = ray.origin - ray.direction * 0.01f;
hitCam[0].transform.forward = ray.direction;
return hitResult[0];
}
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
for (int j = 0; j < hitCamNum; j++)
{
hitCam[j].targetTexture = hitTex;
hitCam[j].clearFlags = CameraClearFlags.SolidColor;
hitCam[j].backgroundColor = Color.black;
hitCam[j].cullingMask = meshLayerMask;//只渲染objMesh层的物体
hitCam[j].RenderWithShader(depthShader, "RenderType");//用depthShader渲染模型像素深度图
RenderTexture.active = hitTex;
tex.ReadPixels(new Rect(0, 0, hitWidth, hitHeight), 0, 0);
c = tex.GetPixels()[0];//读取该像素深度图r=g=b=该摄像机到该像素点的距离/摄像机的maxdistance;
//如果检测到了像素,则计算该像素的坐标
if (c.r > 0)
{
Vector3 temp = hitCam[j].transform.position + hitCam[j].transform.forward * c.r * 1000;
hitResult[j] = temp;
}
else
{
hitResult[j] = Vector3.zero;
}
}
Graphics.Blit(source, destination);
}
private void GetAllPoints()
{
if (degree > 360)
{
degree = 0;
currentLength = Mathf.Infinity;
OnMeasure = false;
return;
}
//以起点和终点的线段为基准,计算该线段在曲面上的投影,旋转一圈得最小值即为最短距离
Vector3 v = upVector * Mathf.Cos(Mathf.Deg2Rad * degree) + forwardVector * Mathf.Sin(Mathf.Deg2Rad * degree);
v.Scale(Vector3.one * 10);
degree += 5;
pointsPos.Clear();
//防止生成的曲线和原模型表面重合,向外拉一点点,保证曲线附着在模型外表面上
pointsPos.Add(startPos + v.normalized * 0.0005f);
for (int i = 1; i < pointNum - 1; i++)
{
Vector3 temp = startPos + (endPos - startPos) * i / pointNum;
hitCam[i - 1].transform.position = temp + v.normalized * 1.5f;
hitCam[i - 1].transform.forward = -v.normalized;
pointsPos.Add(hitResult[i - 1] + v.normalized * 0.0005f);
}
pointsPos.Add(endPos + v.normalized * 0.0005f);
GetMinLine();
}
float currentLength = Mathf.Infinity;
///
/// 获得曲面上两点之间的曲线最短长度
///
private void GetMinLine()
{
float sum = 0;
for (int i = 0; i < pointNum - 1; i++)
{
sum += Vector3.Distance(pointsPos[i], pointsPos[i + 1]);
}
if (sum < currentLength)
{
currentLength = sum;
minPointsPos = pointsPos;
Drawline(minPointsPos);
}
}
void Drawline(List pois)
{
for (int i = 0; i < pois.Count; i++)
{
lineRender.SetPosition(i, pois[i]);
}
}
}