经实验,这种方式是目前能做到最好的方式了。
- 原理:
- 在点击的地方画出LineRender,分别计算出各个点的最大、最小X,Y坐标,可以得到一个矩形。
-
获得矩形长宽较大的一个,作为Ortho相机的OrthoSize;同时也作为Projector投影器的OrthoSize。把相机和投影器放置在正确的位置上,正好能把画的红线罩住。
- Ortho相机拍照一张,作为投影器的贴图。(增大RenderTexture贴图分辨率,可以获得更好的效果)投影器向下投影。Over。
-
部分代码
using SGF.Unity.Common;
using System;
using System.Collections.Generic;
using UnityEngine;
public enum DrawMode
{
NULL,
POLYGON,
ELLIPTICAL,
}
public class TextureGenerator : MonoSingleton
{
private float _minX;
private float _maxX;
private float _minY;
private float _maxY;
private float _middleX;
private float _middleY;
private int _textureWidth;
private int _textureHeight;
public Color LineColor = Color.red; //作为单例,提供颜色给ProjController画lineRender
private Color _backgroundColor = new Color(1, 1, 1, 0); //白色透明
[Header("生成贴图分辨率倍数")]
[Range(1,20)]
public int TexResolutionMult = 10;
[Header("投影器距离地面高度--要比最高的地面位置高")]
public float ProjectorHeight = 200;
public Vector3 GetProjectorPos()
{
return new Vector3(_middleX, ProjectorHeight, _middleY);
}
private void DataRelease()
{
_minX = 0;
_maxX = 0;
_minY = 0;
_maxY = 0;
_middleX = 0;
_middleY = 0;
_textureWidth = 0;
_textureHeight = 0;
}
///
/// 根据相机拍照LineRender生成纹理图片
///
///
///
///
///
///
///
public Texture2D GenerateProceduralTexture(Camera camera, Projector proj, DrawMode type = DrawMode.POLYGON)
{
//CalculateTextureWH(posList); //提前计算过了
int longSide = _textureWidth > _textureHeight ? _textureWidth : _textureHeight;
float orthoSize = longSide / 2.0f;
camera.orthographicSize = orthoSize;
proj.orthographicSize = orthoSize;
longSide *= TexResolutionMult; //乘以分辨率倍数
RenderTexture rt = null;
rt = camera.targetTexture;
if (rt == null)
{
rt = new RenderTexture(longSide, longSide, 0);
camera.targetTexture = rt;
}
else
{
rt.Release();
rt.width = longSide;
rt.height = longSide;
}
Texture2D image = new Texture2D(longSide, longSide);
// The Render Texture in RenderTexture.active is the one
// that will be read by ReadPixels.
RenderTexture.active = rt; //设置当前活动的rendertexture为当前相机的
// Render the camera's view.
camera.Render();
// Make a new texture and read the active Render Texture into it.
//image.ReadPixels(new Rect(0, 0, camera.targetTexture.width, camera.targetTexture.height), 0, 0);
image.ReadPixels(new Rect(0, 0, longSide, longSide), 0, 0);
image.Apply();
return image;
}
#region ===== 程序生成纹理(方案已被取代) =====
///
/// 根据计算点生成图片
///
///
///
///
///
//public Texture2D GenerateProceduralTexture(List posList, bool closePolygon = false, DrawMode type = DrawMode.POLYGON)
//{
// CalculateTextureWH(posList);
// Debug.Log(_textureWidth + ", " + _textureHeight);
// Texture2D proceduralTexture = new Texture2D(_textureWidth, _textureHeight);
// for(int w = 0;w < _textureWidth; w ++)
// {
// for(int h = 0; h < _textureHeight; h++)
// {
// Color pixel = _backgroundColor;
// //遍历该点距离每条边的距离,一旦有一条边满足条件跳出循环
// for(int i = 0;i < posList.Count;i ++)
// {
// int next = i + 1;
// if (next == posList.Count) next = 0;
// Vector3 vstart = WorldToTexture(posList[i]);
// Vector3 vend = WorldToTexture(posList[next]);
// Vector3 vcurrent = new Vector3(w, 0, h);
// if (PointInRect(vstart, vend, vcurrent, LineWidth))
// {
// if (PointToStraightlineDistance(vstart, vend, vcurrent) <= LineWidth)
// {
// pixel = LineColor;
// break;
// }
// }
// }
// proceduralTexture.SetPixel(w, h, pixel);
// }
// }
// proceduralTexture.Apply();
// return proceduralTexture;
//}
#endregion
//计算图片长宽
public void CalculateTextureWH(List posList)
{
DataRelease();
for (int i = 0;i < posList.Count;i ++)
{
float curX = posList[i].x;
float curY = posList[i].z;
if(i == 0)
{
_minX = _maxY = curX;
_minY = _maxY = curY;
}
if (curX < _minX) _minX = curX;
if (curX >= _maxX) _maxX = curX;
if (curY < _minY) _minY = curY;
if (curY >= _maxY) _maxY = curY;
}
_middleX = (_minX + _maxX) / 2;
_middleY = (_minY + _maxY) / 2;
_textureWidth = (int)Math.Ceiling(_maxX - _minX);
_textureHeight = (int)Math.Ceiling(_maxY - _minY);
}
//转换世界坐标系到图片坐标系
private Vector3 WorldToTexture(Vector3 originData)
{
Vector3 transData = new Vector3(originData.x - _minX, 0, originData.z - _minY);
return transData;
}
///
/// 点到直线的距离
///
/// The of point to vector.
/// Start point.
/// End point.
/// Point.
public float PointToStraightlineDistance(Vector3 lineStartPoint, Vector3 lineEndPoint, Vector3 targetPoint)
{
//需要转到2维平面计算
Vector2 startVe2 = IgnoreYAxis(lineStartPoint);
Vector2 endVe2 = IgnoreYAxis(lineEndPoint);
float A = endVe2.y - startVe2.y;
float B = startVe2.x - endVe2.x;
float C = endVe2.x * startVe2.y - startVe2.x * endVe2.y;
float denominator = Mathf.Sqrt(A * A + B * B);
Vector2 pointVe2 = IgnoreYAxis(targetPoint);
return Mathf.Abs((A * pointVe2.x + B * pointVe2.y + C) / denominator);
}
///
/// 只计算 起点到终点+linewidth 矩形内的点,忽略矩形外部的点
///
///
///
///
///
private bool PointInRect(Vector3 lineStartPoint, Vector3 lineEndPoint, Vector3 targetPoint, float linewidth)
{
float minx,miny,maxx,maxy = 0;
if(lineStartPoint.x < lineEndPoint.x)
{
minx = lineStartPoint.x;
maxx = lineEndPoint.x;
}
else
{
minx = lineEndPoint.x;
maxx = lineStartPoint.x;
}
if (lineStartPoint.y < lineEndPoint.y)
{
miny = lineStartPoint.y;
maxy = lineEndPoint.y;
}
else
{
miny = lineEndPoint.y;
maxy = lineStartPoint.y;
}
//minx -= linewidth;
//maxx += linewidth;
//miny -= linewidth;
//maxy += linewidth;
if (targetPoint.x <= maxx && targetPoint.x >= minx)
{
if(targetPoint.y <= maxy && targetPoint.y >= miny)
{
return true;
}
}
return false;
}
///
/// 去掉三维向量的Y轴,把向量投射到xz平面。
///
///
///
public static Vector2 IgnoreYAxis(Vector3 vector3)
{
return new Vector2(vector3.x, vector3.z);
}
}