一個比較簡單的需求,不過遇到些坑,記錄下。
房間有多個模型,每個模型可能多個SubMesh,點擊后,需要能具體到是那個SubMesh,並且在這個SubMesh上顯示邊緣高光,以及能個性這單個SubMesh對應的Material。如一個桌子的Mesh,其實有二個材質,分別對應二個SubMesh,一個桌面和一個桌腳,點擊桌面后,只有這個桌面高光,而不是整個桌子,並且能單獨更換這個桌面的Material.
我們知道Unity中,Mesh和Ogre一樣,也是可以有多個SubMesh,每個SubMesh有自己的Material,但是不同Ogre每個Submesh可以有不同的頂點數據,Unity中Mesh所有SubMesh共享相同頂點數據,分別使用不同的頂點索引。我原來做過一個項目,用Ogre里的Renderable與MovableObject組合形成這種格式,里面的所有模型都是用的這種格式顯示,而不是Ogre本身的Entiy,當時就發現這種更容易理解,好用。
下面這個腳本文件是這個功能的具體實現,包含分解Mesh,檢查具體是那個SubMesh碰撞等功能。

using UnityEngine; using System.Collections; using System.Collections.Generic; using System; using UnityEngine.EventSystems; public class RayCheck : MonoBehaviour { //public MaterialUI ui; public NewUIControl ui; private Collider preCollider; private int cIndex = -1; private Material lineMat = null; private Material selectMat = null; //private Material showMat = null; private MeshFilter meshFilter = null; private MeshRenderer meshRender = null; private MeshCollider meshCollider = null; private BoxCollider boxCollider = null; private new Transform transform = null; private LineRenderer lineRender = null; private HighlightableObject hightLight = null; private float vminDist = 1.0f; private List<int> indexLay = new List<int>(); private bool bPreObject = false; private int preIndex = 0; private Mesh mesh = null; //如果為true,所有模型都能切換texture public bool defaultAdd = true; private RaycastHit preHit = new RaycastHit(); private bool bHold = false; private Vector3 oldLocation = Vector3.zero; private Dictionary<string, List<string>> matTextures = new Dictionary<string, List<string>>(); void Start() { lineMat = Resources.Load<Material>("LineMat"); //showMat = Resources.Load<Material>("ShowMat"); meshFilter = checkDefault<MeshFilter>(); meshCollider = checkDefault<MeshCollider>(); meshRender = checkDefault<MeshRenderer>(); boxCollider = checkDefault<BoxCollider>(); transform = checkDefault<Transform>(); lineRender = checkDefault<LineRenderer>(); hightLight = checkDefault<HighlightableObject>(); lineRender.useWorldSpace = false; lineRender.sharedMaterial = lineMat; lineRender.SetWidth(0.01f, 0.01f); //meshRender.sharedMaterial = showMat; //單獨添加層,在正常渲染時,讓攝像機不渲染這個模型,我們只需要在高光時渲染 this.gameObject.layer = 4; mesh = new Mesh(); StartCoroutine(XmlReader.GetXML("materialtextures.xml", matTextures, XmlReader.ParseMatXml, this.InitUiMatTextures)); //ui.onApply = ApplyMaterial; } public void InitUiMatTextures() { ui.SetMatTextures(matTextures); } T checkDefault<T>() where T : Component { T t = this.gameObject.GetComponent<T>(); if (t == null) { t = this.gameObject.AddComponent<T>(); } return t; } // Update is called once per frame void Update() { //!ui.isActiveAndEnabled && #if UNITY_EDITOR if (Input.GetMouseButtonDown(0) && !EventSystem.current.IsPointerOverGameObject()) #elif UNITY_ANDROID || UNITY_IPHONE if (Input.touchCount > 0 && Input.GetTouch(0).phase == TouchPhase.Began && !EventSystem.current.IsPointerOverGameObject(Input.GetTouch(0).fingerId)) #endif { Ray ray = Camera.main.ScreenPointToRay(Input.mousePosition); RaycastHit hit; var bAxis = Physics.Raycast(ray, out hit, 10000.0f, 2 << 7); if (bAxis) { preHit = hit; bHold = true; oldLocation = preCollider.transform.position; return; } //與當前camera最近交點 if (GetMinDist(ray, out hit)) { if (preCollider != null) { lineRender.enabled = false; } //如果不是同一個模型,indexLay清空 bPreObject = hit.collider == preCollider; if (!bPreObject && preCollider != null) { preCollider.gameObject.SetActive(true); indexLay.Clear(); } preCollider = hit.collider; //得到選擇的gameObject模型 if (checkIsRender(hit.collider, ray)) { //preCollider.gameObject.SetActive(false); if (selectMat != null) { var haveTexture = ui.SetSelectMat(selectMat); //當前材質有對應能夠更新的紋理 if (defaultAdd || haveTexture) ui.gameObject.SetActive(true); else ui.gameObject.SetActive(false); //顯示高亮 hightLight.ReinitMaterials(); hightLight.FlashingOn(2f); if (defaultAdd || haveTexture) hightLight.On(); else hightLight.Off(); } } } } #if UNITY_EDITOR if (Input.GetMouseButtonUp(0)) #elif UNITY_ANDROID || UNITY_IPHONE if (Input.touchCount > 0 && Input.GetTouch(0).phase == TouchPhase.Ended) #endif { bHold = false; } } public void FixedUpdate() { DragAxis(); } public void DragAxis() { #if UNITY_EDITOR if (bHold && Input.GetMouseButton(0)) #elif UNITY_ANDROID || UNITY_IPHONE if (bHold && Input.touchCount > 0 && Input.GetTouch(0).phase == TouchPhase.Moved) #endif { Ray ray = Camera.main.ScreenPointToRay(Input.mousePosition); var newPot = ray.origin + ray.direction * preHit.distance - preHit.point; preCollider.transform.position = newPot; } } public bool GetMinDist(Ray ray, out RaycastHit rayHit) { rayHit = new RaycastHit(); var hits = Physics.RaycastAll(ray); var origin = Camera.main.transform.position; float minDist = float.MaxValue; bool result = false; foreach (var hit in hits) { if (hit.collider == meshCollider || hit.collider == boxCollider) continue; var sqrLenght = (hit.point - origin).sqrMagnitude; if (sqrLenght < minDist) { minDist = sqrLenght; rayHit = hit; result = true; } } return result; } public bool checkIsRender(Collider collider, Ray ray) { var render = collider.GetComponent<Renderer>(); var filter = collider.GetComponent<MeshFilter>(); if (render != null && filter != null) { //設置成當成位置 transform.position = render.transform.position; transform.rotation = render.transform.rotation; transform.localScale = render.transform.localScale; transform.parent = render.transform.parent; //初始化信息 float minDist = float.MaxValue; AABB minAABB = new AABB(); cIndex = 0; selectMat = null; mesh.Clear(); Collider currentCollider = null; mesh.vertices = filter.mesh.vertices; mesh.normals = filter.mesh.normals; mesh.uv = filter.mesh.uv; mesh.uv2 = filter.mesh.uv2; //使用如下語句,顏色包含GI能正確使用,描邊不能用,可能與UV有關。 //不使用,模型對應GI顏色不對,但是描邊能用。 //解決方法: 定義raycheck自己的gameObject的層為單獨一層,這樣當前模型不渲染,只渲染描邊 //meshRender.useLightProbes = render.useLightProbes; //meshRender.lightmapIndex = render.lightmapIndex; //meshRender.lightmapScaleOffset = render.lightmapScaleOffset; //meshRender.realtimeLightmapIndex = render.realtimeLightmapIndex; //meshRender.realtimeLightmapScaleOffset = render.realtimeLightmapScaleOffset; //如果有多個SubMesh,重新分割每個SubMesh,並重新驗證相交,取最近subMesh if (filter.mesh.subMeshCount > 1) { for (int meshIndex = 0; meshIndex < filter.mesh.subMeshCount; meshIndex++) { meshCollider.enabled = false; boxCollider.enabled = false; var indexs = filter.mesh.GetIndices(meshIndex); mesh.SetIndices(indexs, filter.mesh.GetTopology(meshIndex), 0); if (indexs.Length / 3 > 255) { boxCollider.enabled = true; boxCollider.center = mesh.bounds.center; boxCollider.size = mesh.bounds.size; currentCollider = boxCollider; } else { meshCollider.enabled = true; meshCollider.sharedMesh = mesh; currentCollider = meshCollider; } RaycastHit hit; if (currentCollider.Raycast(ray, out hit, 10000)) { float sqrLenght = (Camera.main.transform.position - hit.point).sqrMagnitude; //模型有多個subMesh,但是無論怎么點擊,其中一個subMesh總是最近。 if (Mathf.Abs(sqrLenght - minDist) < vminDist) { if (!indexLay.Contains(cIndex)) { indexLay.Add(cIndex); } if (!indexLay.Contains(meshIndex)) { indexLay.Add(meshIndex); } } if (sqrLenght < minDist) { minDist = sqrLenght; cIndex = meshIndex; minAABB.Min = mesh.bounds.center - mesh.bounds.size / 2f; minAABB.Max = mesh.bounds.center + mesh.bounds.size / 2f; } } } } //一個模型有多個SubMesh,並且每個SubMesh邊框相重。 if (indexLay.Count > 1 && preCollider) { if (indexLay.Contains(preIndex)) { var nIndex = indexLay.IndexOf(preIndex); cIndex = ++nIndex % indexLay.Count; } preIndex = cIndex; } if (cIndex >= 0 && render.materials.Length > cIndex) { selectMat = render.materials[cIndex]; //顯示自己的Mesh var indexs = filter.mesh.GetIndices(cIndex); mesh.SetIndices(indexs, filter.mesh.GetTopology(cIndex), 0); meshFilter.mesh = mesh; //重新設置材質 meshRender.material = selectMat; var vertexs = minAABB.LinePositions(); lineRender.SetVertexCount(vertexs.Length); lineRender.SetPositions(vertexs); //重置狀態 //lineRender.enabled = true; meshRender.enabled = true; return true; } } return false; } public void ApplyMaterial() { meshRender.enabled = false; ui.gameObject.SetActive(false); } } public class AABB { private bool cornerDirty = true; private Vector3 min = Vector3.zero; private Vector3 max = Vector3.zero; public Vector3[] mCorners = new Vector3[8]; public Vector3[] Corners { get { if (cornerDirty) { GetAllCorners(); } return mCorners; } } public Vector3 Min { get { return min; } set { min = value; cornerDirty = true; } } public Vector3 Max { get { return max; } set { max = value; cornerDirty = true; } } private void makeFloor(Vector3 cmp) { if (cmp.x < min.x) min.x = cmp.x; if (cmp.y < min.y) min.y = cmp.y; if (cmp.z < min.z) min.z = cmp.z; } private void makeCeil(Vector3 cmp) { if (cmp.x > max.x) max.x = cmp.x; if (cmp.y > max.y) max.y = cmp.y; if (cmp.z > max.z) max.z = cmp.z; } public void Merge(Vector3 cmp) { makeCeil(cmp); makeFloor(cmp); cornerDirty = true; } public void SetNull() { min = Vector3.zero; max = Vector3.zero; cornerDirty = true; } /// <summary> /// private void GetAllCorners() { mCorners[0] = min; mCorners[1].x = min.x; mCorners[1].y = max.y; mCorners[1].z = min.z; mCorners[2].x = max.x; mCorners[2].y = max.y; mCorners[2].z = min.z; mCorners[3].x = max.x; mCorners[3].y = min.y; mCorners[3].z = min.z; mCorners[4] = max; mCorners[5].x = min.x; mCorners[5].y = max.y; mCorners[5].z = max.z; mCorners[6].x = min.x; mCorners[6].y = min.y; mCorners[6].z = max.z; mCorners[7].x = max.x; mCorners[7].y = min.y; mCorners[7].z = max.z; } public Vector3[] LinePositions() { int i = 0; Vector3[] pos = new Vector3[16]; //前面 pos[i++] = this.Corners[0]; pos[i++] = this.Corners[1]; pos[i++] = this.Corners[2]; //下邊 pos[i++] = this.Corners[3]; pos[i++] = this.Corners[0]; pos[i++] = this.Corners[6]; //右邊 pos[i++] = this.Corners[7]; pos[i++] = this.Corners[3]; pos[i++] = this.Corners[2]; //后面 pos[i++] = this.Corners[4]; pos[i++] = this.Corners[7]; pos[i++] = this.Corners[6]; //左邊 上邊 pos[i++] = this.Corners[5]; pos[i++] = this.Corners[1]; pos[i++] = this.Corners[5]; pos[i++] = this.Corners[4]; return pos; } public AABB Clone() { AABB ab = new AABB(); ab.min = this.min; ab.max = this.max; ab.cornerDirty = true; return ab; } }
需要注意的點是:
1 如果幾個模型有多個SubMesh分散在各個位置,故需要把所有RaycastHit上碰撞點與眼睛求出最近點。
2 LineRender中是N點組成N-1條線,而不是N/2,如A-B-C-D,並不是顯示AB,CD.而是AB,BC,CD.
3 模型的SubMesh可能邊框重合,這樣的話,就會導致可能永遠都是選的其中一個。
4 我們根據SubMesh生成新的Mesh,並不需要在主攝像頭中渲染(通過Layer與cullingMask組合),不然和原來模型的SubMesh顯示不清。
5 鼠標按下,是否在UI上面,鼠標彈起,電腦與移動平台要不同的處理。
6 安卓平台下,用WWW加載資源,必需用yield return,故相應加載完成的處理可以用函數指針傳入。
到這模型就差不多了,然后添加邊緣高亮組件highightingSystem,這個的思路也是比較簡單的。
首先在主攝像機渲染場景前,把邊緣高亮的模型給一個單獨的層,並且修改相應材質為我們需要高亮的顏色,然后復制主攝像頭新生成一個攝像頭,新攝像頭的cullingMask只渲染前面邊緣高亮模型的層的那些模型到一張Stencil的RTT中保存,然后把原來的邊緣高亮的模型的層和材質換回來。
然后是主攝像頭正常渲染,渲染完后,在OnRenderImage中先把在上面的那張RTT進行簡單的Blur模糊,保存為Blur的RTT。最后把上面的Stencil的RTT,Blur的RTT,主攝像頭渲染的source,我們並不渲染stencil本身,只渲染stencil模糊后的邊緣部分。
嗯,現在有個麻煩,老大要在看不到的部分不顯示高亮,如下這樣:
第一張圖是現在的顯示效果,老大要的是第二張,說實話,我最開始以為很簡單,好吧,做完后就加了點東東,確實不復雜,但是因為對Unity的相關理解有誤,把采過的坑說下。
說實話,這個需求就是加個深度檢測就行了,那么在原來基礎上添加如下一些代碼。

shaderCamera.CopyFrom(refCam); shaderCamera.projectionMatrix = refCam.projectionMatrix; // Uncomment this line if you have problems using Highlighting System with custom projection matrix on your camera shaderCamera.cullingMask = layerMask; shaderCamera.rect = new Rect(0f, 0f, 1f, 1f); shaderCamera.renderingPath = RenderingPath.Forward; shaderCamera.hdr = false; shaderCamera.useOcclusionCulling = false; shaderCamera.backgroundColor = new Color(0f, 0f, 0f, 0f); shaderCamera.clearFlags = CameraClearFlags.Color; shaderCamera.targetTexture = stencilBuffer; //我們因為直接在渲染highlight object,故可以直接算出深度,並不需要在前面多渲染一次 shaderCamera.depthTextureMode = DepthTextureMode.None; //通過culling mask(layerMask),只渲染highlight object shaderCamera.Render(); //渲染深度 depthBuffer = RenderTexture.GetTemporary((int)GetComponent<Camera>().pixelWidth, (int)GetComponent<Camera>().pixelHeight, 16, RenderTextureFormat.RHalf); shaderCamera.targetTexture = depthBuffer; shaderCamera.RenderWithShader(DepthShader, null);
Shader.

Shader "Custom/Render depth buffer" { SubShader { Tags{ "RenderType" = "Opaque" } Pass { ZWrite Off ZTest Always Lighting Off Fog{ Mode Off } CGPROGRAM #pragma vertex vert #pragma fragment frag #include "UnityCG.cginc" //sampler2D_float _LastCameraDepthTexture; //sampler2D_float _CameraDepthTexture; struct v2f { float4 vertex : POSITION; float2 uv : TEXCOORD0; float depth : TEXCOORD1; }; v2f vert(appdata_img v) { v2f o; o.vertex = mul(UNITY_MATRIX_MVP, v.vertex); o.uv = v.texcoord.xy; //float2(v.texcoord.x, 1 - v.texcoord.y); //v.texcoord.xy; o.depth = COMPUTE_DEPTH_01; return o; } float4 frag(v2f i) : COLOR { //float depth = SAMPLE_DEPTH_TEXTURE(_LastCameraDepthTexture, i.uv); //float depthSample = Linear01Depth(SAMPLE_DEPTH_TEXTURE(_LastCameraDepthTexture, i.uv));//_CameraDepthTexture _LastCameraDepthTexture //return float4(depthSample, 0, 0, 0); return float4(i.depth,0,0,0); } ENDCG } } FallBack Off }
在這我進行一次嘗試,結果不對,在shaderCamera.Render()渲染之前,設定depthTextureMode為Depth,我在Shader開始應用_CameraDepthTexture,發現結果不對,網上查找說是這個RTT一直是主攝像頭的,后面使用_LastCameraDepthTexture,結果很奇怪,和后面主攝像頭的_CameraDepthTexture比對結果完全對不上,深度值不是0或1,但是渲染出來看,深度值又沒看到變化,后來仔細想了下,應該是主攝像頭Graphics.Blit后的值,因為這個只是渲染一個正方形,深度顯示出來就會這樣。
最后去Unity5Shader里面找_CameraDepthTexture這個RTT是如何渲染的,我們找到這個值COMPUTE_DEPTH_01是放入深度RTT中的,具體意思大家去unityCG.cginc里去找就行了,因為這個值就是根據當前頂點的位置算出來的,所以在這我們放入頂點着色器就行。
然后就是在第一張Blur模糊圖上比較上一張深度RTT的深度值,相應DEPTH_COMP_ON位置為新增加的。

// Downsamples source texture private void DownSample4x(RenderTexture source, RenderTexture dest) { float off = 1.0f; blurMaterial.SetFloat("_OffsetScale", off); blurMaterial.SetTexture("_DepthTex", depthBuffer); if (bBlueDepthTest) { blurMaterial.EnableKeyword("DEPTH_COMP_ON"); } //blurMaterial.DisableKeyword("DEPTH_COMP_OFF"); Graphics.Blit(source, dest, blurMaterial); }

Shader "Hidden/Highlighted/Blur" { Properties { _MainTex("", 2D) = "" {} _Intensity("", Range(0.25,0.5)) = 0.3 _DepthTex("", 2D) = "" {} } SubShader { Pass { ZTest Always Cull Off ZWrite Off Lighting Off Fog { Mode Off } CGPROGRAM #pragma vertex vert #pragma fragment frag #pragma fragmentoption ARB_precision_hint_fastest #pragma multi_compile __ DEPTH_COMP_ON //DEPTH_COMP_OFF #include "UnityCG.cginc" uniform sampler2D _MainTex; uniform half4 _MainTex_TexelSize; uniform half _OffsetScale; uniform fixed _Intensity; #if defined(DEPTH_COMP_ON) uniform sampler2D _DepthTex; sampler2D_float _CameraDepthTexture; #endif struct v2f { float4 pos : POSITION; half2 duv : TEXCOORD0; half2 uv[4] : TEXCOORD1; }; v2f vert(appdata_img v) { // Shader code optimized for the Unity shader compiler v2f o; o.pos = mul(UNITY_MATRIX_MVP, v.vertex); half2 offs = _MainTex_TexelSize.xy * _OffsetScale; o.uv[0].x = v.texcoord.x - offs.x; o.uv[0].y = v.texcoord.y - offs.y; o.uv[1].x = v.texcoord.x + offs.x; o.uv[1].y = v.texcoord.y - offs.y; o.uv[2].x = v.texcoord.x + offs.x; o.uv[2].y = v.texcoord.y + offs.y; o.uv[3].x = v.texcoord.x - offs.x; o.uv[3].y = v.texcoord.y + offs.y; o.duv = v.texcoord.xy; if (_MainTex_TexelSize.y < 0) o.duv.y = 1 - o.duv.y; return o; } fixed4 frag(v2f i) : COLOR { fixed4 color1 = tex2D(_MainTex, i.uv[0]); fixed4 color2 = tex2D(_MainTex, i.uv[1]); fixed4 color3 = tex2D(_MainTex, i.uv[2]); fixed4 color4 = tex2D(_MainTex, i.uv[3]); fixed4 color; color.rgb = max(color1.rgb, color2.rgb); color.rgb = max(color.rgb, color3.rgb); color.rgb = max(color.rgb, color4.rgb); color.a = (color1.a + color2.a + color3.a + color4.a) * _Intensity; #if defined(DEPTH_COMP_ON) float cDepth = Linear01Depth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.duv)); float oDepth = tex2D(_DepthTex, i.duv).r; //當Blur楨的深度 大於 當前楨的深度 if (abs(oDepth - cDepth) > 0.001) { color.rgba = fixed4(0,0,0,0); } #endif return color; } ENDCG } } Fallback off }
注意:
1 我們只需要比較第一張模糊圖的深度,后面的模糊都是根據這張再重新模糊,因此我們在着色器定義編譯符,使之第一次與后面幾次根據編譯符不同的執行。
2 在深度比較的Shader中,我們其實已經取不到原頂點pos相應的值了,因為我們並不是渲染原來的模型,而是相當於Ogre中的后處理PassQuad(只渲染一個正方形),因此,在這之前,需要將主攝像根據情況,先把設定主攝像頭的depthTextureMode為Depth,這樣在OnPreRender之后,主攝像頭正常渲染前,先調用UpdateDepthTexture,渲染場景內所有模型的深度到_CameraDepthTexture上,這樣在后面的OnRenderImage中,我們才能取到正常的深度值。
3 在這,二張深度圖里默認的精度都只有16位,因此需要定義一個范圍。
有幾次試錯,都在於沒搞清Unity里的執行過程,后來結合Untiy提供的Frame Debugger,才搞定這個簡單的修改。