SSAO.hlsl 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. #ifndef UNIVERSAL_SSAO_INCLUDED
  2. #define UNIVERSAL_SSAO_INCLUDED
  3. // Includes
  4. #include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
  5. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderVariablesFunctions.hlsl"
  6. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareDepthTexture.hlsl"
  7. #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareNormalsTexture.hlsl"
  8. // Textures & Samplers
  9. TEXTURE2D_X(_BaseMap);
  10. TEXTURE2D_X(_ScreenSpaceOcclusionTexture);
  11. SAMPLER(sampler_BaseMap);
  12. SAMPLER(sampler_ScreenSpaceOcclusionTexture);
  13. // Params
  14. float4 _BlurOffset;
  15. float4 _SSAOParams;
  16. float4 _SourceSize;
  17. // SSAO Settings
  18. #define INTENSITY _SSAOParams.x
  19. #define RADIUS _SSAOParams.y
  20. #define DOWNSAMPLE _SSAOParams.z
  21. // GLES2: In many cases, dynamic looping is not supported.
  22. #if defined(SHADER_API_GLES) && !defined(SHADER_API_GLES3)
  23. #define SAMPLE_COUNT 3
  24. #else
  25. #define SAMPLE_COUNT _SSAOParams.w
  26. #endif
  27. // Function defines
  28. #define SCREEN_PARAMS GetScaledScreenParams()
  29. #define SAMPLE_BASEMAP(uv) SAMPLE_TEXTURE2D_X(_BaseMap, sampler_BaseMap, UnityStereoTransformScreenSpaceTex(uv));
  30. #define SAMPLE_BASEMAP_R(uv) SAMPLE_TEXTURE2D_X(_BaseMap, sampler_BaseMap, UnityStereoTransformScreenSpaceTex(uv)).r;
  31. // Constants
  32. // kContrast determines the contrast of occlusion. This allows users to control over/under
  33. // occlusion. At the moment, this is not exposed to the editor because it's rarely useful.
  34. static const float kContrast = 0.6;
  35. // The constant below controls the geometry-awareness of the bilateral
  36. // filter. The higher value, the more sensitive it is.
  37. static const float kGeometryCoeff = 0.8;
  38. // The constants below are used in the AO estimator. Beta is mainly used for suppressing
  39. // self-shadowing noise, and Epsilon is used to prevent calculation underflow. See the
  40. // paper (Morgan 2011 http://goo.gl/2iz3P) for further details of these constants.
  41. static const float kBeta = 0.002;
  42. #define EPSILON 1.0e-4
  43. float4 PackAONormal(float ao, float3 n)
  44. {
  45. return float4(ao, n * 0.5 + 0.5);
  46. }
  47. float3 GetPackedNormal(float4 p)
  48. {
  49. return p.gba * 2.0 - 1.0;
  50. }
  51. float GetPackedAO(float4 p)
  52. {
  53. return p.r;
  54. }
  55. float EncodeAO(float x)
  56. {
  57. #if UNITY_COLORSPACE_GAMMA
  58. return 1.0 - max(LinearToSRGB(1.0 - saturate(x)), 0.0);
  59. #else
  60. return x;
  61. #endif
  62. }
  63. float CompareNormal(float3 d1, float3 d2)
  64. {
  65. return smoothstep(kGeometryCoeff, 1.0, dot(d1, d2));
  66. }
  67. float2 GetScreenSpacePosition(float2 uv)
  68. {
  69. return uv * SCREEN_PARAMS.xy * DOWNSAMPLE;
  70. }
  71. // Trigonometric function utility
  72. float2 CosSin(float theta)
  73. {
  74. float sn, cs;
  75. sincos(theta, sn, cs);
  76. return float2(cs, sn);
  77. }
  78. // Pseudo random number generator with 2D coordinates
  79. float UVRandom(float u, float v)
  80. {
  81. float f = dot(float2(12.9898, 78.233), float2(u, v));
  82. return frac(43758.5453 * sin(f));
  83. }
  84. // Sample point picker
  85. float3 PickSamplePoint(float2 uv, float randAddon, int index)
  86. {
  87. float2 positionSS = GetScreenSpacePosition(uv);
  88. float gn = InterleavedGradientNoise(positionSS, index);
  89. float u = frac(UVRandom(0.0, index + randAddon) + gn) * 2.0 - 1.0;
  90. float theta = (UVRandom(1.0, index + randAddon) + gn) * TWO_PI;
  91. return float3(CosSin(theta) * sqrt(1.0 - u * u), u);
  92. }
  93. float RawToLinearDepth(float rawDepth)
  94. {
  95. #if defined(_ORTHOGRAPHIC)
  96. #if UNITY_REVERSED_Z
  97. return ((_ProjectionParams.z - _ProjectionParams.y) * (1.0 - rawDepth) + _ProjectionParams.y);
  98. #else
  99. return ((_ProjectionParams.z - _ProjectionParams.y) * (rawDepth) + _ProjectionParams.y);
  100. #endif
  101. #else
  102. return LinearEyeDepth(rawDepth, _ZBufferParams);
  103. #endif
  104. }
  105. float SampleAndGetLinearDepth(float2 uv)
  106. {
  107. float rawDepth = SampleSceneDepth(uv.xy).r;
  108. return RawToLinearDepth(rawDepth);
  109. }
  110. float3 ReconstructViewPos(float2 uv, float depth, float2 p11_22, float2 p13_31)
  111. {
  112. #if defined(_ORTHOGRAPHIC)
  113. float3 viewPos = float3(((uv.xy * 2.0 - 1.0 - p13_31) * p11_22), depth);
  114. #else
  115. float3 viewPos = float3(depth * ((uv.xy * 2.0 - 1.0 - p13_31) * p11_22), depth);
  116. #endif
  117. return viewPos;
  118. }
  119. // Try reconstructing normal accurately from depth buffer.
  120. // Low: DDX/DDY on the current pixel
  121. // Medium: 3 taps on each direction | x | * | y |
  122. // High: 5 taps on each direction: | z | x | * | y | w |
  123. // https://atyuwen.github.io/posts/normal-reconstruction/
  124. // https://wickedengine.net/2019/09/22/improved-normal-reconstruction-from-depth/
  125. float3 ReconstructNormal(float2 uv, float depth, float3 vpos, float2 p11_22, float2 p13_31)
  126. {
  127. #if defined(_RECONSTRUCT_NORMAL_LOW)
  128. return normalize(cross(ddy(vpos), ddx(vpos)));
  129. #else
  130. float2 delta = _SourceSize.zw * 2.0;
  131. // Sample the neighbour fragments
  132. float2 lUV = float2(-delta.x, 0.0);
  133. float2 rUV = float2( delta.x, 0.0);
  134. float2 uUV = float2(0.0, delta.y);
  135. float2 dUV = float2(0.0, -delta.y);
  136. float3 l1 = float3(uv + lUV, 0.0); l1.z = SampleAndGetLinearDepth(l1.xy); // Left1
  137. float3 r1 = float3(uv + rUV, 0.0); r1.z = SampleAndGetLinearDepth(r1.xy); // Right1
  138. float3 u1 = float3(uv + uUV, 0.0); u1.z = SampleAndGetLinearDepth(u1.xy); // Up1
  139. float3 d1 = float3(uv + dUV, 0.0); d1.z = SampleAndGetLinearDepth(d1.xy); // Down1
  140. // Determine the closest horizontal and vertical pixels...
  141. // horizontal: left = 0.0 right = 1.0
  142. // vertical : down = 0.0 up = 1.0
  143. #if defined(_RECONSTRUCT_NORMAL_MEDIUM)
  144. uint closest_horizontal = l1.z > r1.z ? 0 : 1;
  145. uint closest_vertical = d1.z > u1.z ? 0 : 1;
  146. #else
  147. float3 l2 = float3(uv + lUV * 2.0, 0.0); l2.z = SampleAndGetLinearDepth(l2.xy); // Left2
  148. float3 r2 = float3(uv + rUV * 2.0, 0.0); r2.z = SampleAndGetLinearDepth(r2.xy); // Right2
  149. float3 u2 = float3(uv + uUV * 2.0, 0.0); u2.z = SampleAndGetLinearDepth(u2.xy); // Up2
  150. float3 d2 = float3(uv + dUV * 2.0, 0.0); d2.z = SampleAndGetLinearDepth(d2.xy); // Down2
  151. const uint closest_horizontal = abs( (2.0 * l1.z - l2.z) - depth) < abs( (2.0 * r1.z - r2.z) - depth) ? 0 : 1;
  152. const uint closest_vertical = abs( (2.0 * d1.z - d2.z) - depth) < abs( (2.0 * u1.z - u2.z) - depth) ? 0 : 1;
  153. #endif
  154. // Calculate the triangle, in a counter-clockwize order, to
  155. // use based on the closest horizontal and vertical depths.
  156. // h == 0.0 && v == 0.0: p1 = left, p2 = down
  157. // h == 1.0 && v == 0.0: p1 = down, p2 = right
  158. // h == 1.0 && v == 1.0: p1 = right, p2 = up
  159. // h == 0.0 && v == 1.0: p1 = up, p2 = left
  160. // Calculate the view space positions for the three points...
  161. float3 P1;
  162. float3 P2;
  163. if (closest_vertical == 0)
  164. {
  165. P1 = closest_horizontal == 0 ? l1 : d1;
  166. P2 = closest_horizontal == 0 ? d1 : r1;
  167. }
  168. else
  169. {
  170. P1 = closest_horizontal == 0 ? u1 : r1;
  171. P2 = closest_horizontal == 0 ? l1 : u1;
  172. }
  173. P1 = ReconstructViewPos(P1.xy, P1.z, p11_22, p13_31);
  174. P2 = ReconstructViewPos(P2.xy, P2.z, p11_22, p13_31);
  175. // Use the cross product to calculate the normal...
  176. return normalize(cross(P2 - vpos, P1 - vpos));
  177. #endif
  178. }
  179. void SampleDepthNormalView(float2 uv, float2 p11_22, float2 p13_31, out float depth, out float3 normal, out float3 vpos)
  180. {
  181. depth = SampleAndGetLinearDepth(uv);
  182. vpos = ReconstructViewPos(uv, depth, p11_22, p13_31);
  183. #if defined(_SOURCE_DEPTH_NORMALS)
  184. normal = SampleSceneNormals(uv);
  185. #else
  186. normal = ReconstructNormal(uv, depth, vpos, p11_22, p13_31);
  187. #endif
  188. }
  189. float3x3 GetCoordinateConversionParameters(out float2 p11_22, out float2 p13_31)
  190. {
  191. float3x3 camProj = (float3x3)unity_CameraProjection;
  192. p11_22 = rcp(float2(camProj._11, camProj._22));
  193. p13_31 = float2(camProj._13, camProj._23);
  194. return camProj;
  195. }
  196. // Distance-based AO estimator based on Morgan 2011
  197. // "Alchemy screen-space ambient obscurance algorithm"
  198. // http://graphics.cs.williams.edu/papers/AlchemyHPG11/
  199. float4 SSAO(Varyings input) : SV_Target
  200. {
  201. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  202. float2 uv = input.uv;
  203. // Parameters used in coordinate conversion
  204. float2 p11_22, p13_31;
  205. float3x3 camProj = GetCoordinateConversionParameters(p11_22, p13_31);
  206. // Get the depth, normal and view position for this fragment
  207. float depth_o;
  208. float3 norm_o;
  209. float3 vpos_o;
  210. SampleDepthNormalView(uv, p11_22, p13_31, depth_o, norm_o, vpos_o);
  211. // This was added to avoid a NVIDIA driver issue.
  212. float randAddon = uv.x * 1e-10;
  213. float rcpSampleCount = rcp(SAMPLE_COUNT);
  214. float ao = 0.0;
  215. for (int s = 0; s < int(SAMPLE_COUNT); s++)
  216. {
  217. #if defined(SHADER_API_D3D11)
  218. // This 'floor(1.0001 * s)' operation is needed to avoid a DX11 NVidia shader issue.
  219. s = floor(1.0001 * s);
  220. #endif
  221. // Sample point
  222. float3 v_s1 = PickSamplePoint(uv, randAddon, s);
  223. // Make it distributed between [0, _Radius]
  224. v_s1 *= sqrt((s + 1.0) * rcpSampleCount ) * RADIUS;
  225. v_s1 = faceforward(v_s1, -norm_o, v_s1);
  226. float3 vpos_s1 = vpos_o + v_s1;
  227. // Reproject the sample point
  228. float3 spos_s1 = mul(camProj, vpos_s1);
  229. #if defined(_ORTHOGRAPHIC)
  230. float2 uv_s1_01 = clamp((spos_s1.xy + 1.0) * 0.5, 0.0, 1.0);
  231. #else
  232. float2 uv_s1_01 = clamp((spos_s1.xy * rcp(vpos_s1.z) + 1.0) * 0.5, 0.0, 1.0);
  233. #endif
  234. // Depth at the sample point
  235. float depth_s1 = SampleAndGetLinearDepth(uv_s1_01);
  236. // Relative position of the sample point
  237. float3 vpos_s2 = ReconstructViewPos(uv_s1_01, depth_s1, p11_22, p13_31);
  238. float3 v_s2 = vpos_s2 - vpos_o;
  239. // Estimate the obscurance value
  240. float a1 = max(dot(v_s2, norm_o) - kBeta * depth_o, 0.0);
  241. float a2 = dot(v_s2, v_s2) + EPSILON;
  242. ao += a1 * rcp(a2);
  243. }
  244. // Intensity normalization
  245. ao *= RADIUS;
  246. // Apply contrast
  247. ao = PositivePow(ao * INTENSITY * rcpSampleCount, kContrast);
  248. return PackAONormal(ao, norm_o);
  249. }
  250. // Geometry-aware separable bilateral filter
  251. half4 Blur(float2 uv, float2 delta) : SV_Target
  252. {
  253. float4 p0 = SAMPLE_BASEMAP(uv );
  254. float4 p1a = SAMPLE_BASEMAP(uv - delta * 1.3846153846);
  255. float4 p1b = SAMPLE_BASEMAP(uv + delta * 1.3846153846);
  256. float4 p2a = SAMPLE_BASEMAP(uv - delta * 3.2307692308);
  257. float4 p2b = SAMPLE_BASEMAP(uv + delta * 3.2307692308);
  258. #if defined(BLUR_SAMPLE_CENTER_NORMAL)
  259. #if defined(_SOURCE_DEPTH_NORMALS)
  260. float3 n0 = SampleSceneNormals(uv);
  261. #else
  262. float2 p11_22, p13_31;
  263. float3x3 camProj = GetCoordinateConversionParameters(p11_22, p13_31);
  264. // Get the depth, normal and view position for this fragment
  265. float depth_o;
  266. float3 n0;
  267. float3 vpos_o;
  268. SampleDepthNormalView(uv, p11_22, p13_31, depth_o, n0, vpos_o);
  269. #endif
  270. #else
  271. float3 n0 = GetPackedNormal(p0);
  272. #endif
  273. float w0 = 0.2270270270;
  274. float w1a = CompareNormal(n0, GetPackedNormal(p1a)) * 0.3162162162;
  275. float w1b = CompareNormal(n0, GetPackedNormal(p1b)) * 0.3162162162;
  276. float w2a = CompareNormal(n0, GetPackedNormal(p2a)) * 0.0702702703;
  277. float w2b = CompareNormal(n0, GetPackedNormal(p2b)) * 0.0702702703;
  278. float s;
  279. s = GetPackedAO(p0) * w0;
  280. s += GetPackedAO(p1a) * w1a;
  281. s += GetPackedAO(p1b) * w1b;
  282. s += GetPackedAO(p2a) * w2a;
  283. s += GetPackedAO(p2b) * w2b;
  284. s *= rcp(w0 + w1a + w1b + w2a + w2b);
  285. return PackAONormal(s, n0);
  286. }
  287. // Geometry-aware bilateral filter (single pass/small kernel)
  288. float BlurSmall(float2 uv, float2 delta)
  289. {
  290. float4 p0 = SAMPLE_BASEMAP(uv );
  291. float4 p1 = SAMPLE_BASEMAP(uv + float2(-delta.x, -delta.y));
  292. float4 p2 = SAMPLE_BASEMAP(uv + float2( delta.x, -delta.y));
  293. float4 p3 = SAMPLE_BASEMAP(uv + float2(-delta.x, delta.y));
  294. float4 p4 = SAMPLE_BASEMAP(uv + float2( delta.x, delta.y));
  295. float3 n0 = GetPackedNormal(p0);
  296. float w0 = 1.0;
  297. float w1 = CompareNormal(n0, GetPackedNormal(p1));
  298. float w2 = CompareNormal(n0, GetPackedNormal(p2));
  299. float w3 = CompareNormal(n0, GetPackedNormal(p3));
  300. float w4 = CompareNormal(n0, GetPackedNormal(p4));
  301. float s;
  302. s = GetPackedAO(p0) * w0;
  303. s += GetPackedAO(p1) * w1;
  304. s += GetPackedAO(p2) * w2;
  305. s += GetPackedAO(p3) * w3;
  306. s += GetPackedAO(p4) * w4;
  307. return s *= rcp(w0 + w1 + w2 + w3 + w4);
  308. }
  309. half4 HorizontalBlur(Varyings input) : SV_Target
  310. {
  311. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  312. float2 uv = input.uv;
  313. float2 delta = float2(_SourceSize.z * rcp(DOWNSAMPLE) * 2.0, 0.0);
  314. return Blur(uv, delta);
  315. }
  316. half4 VerticalBlur(Varyings input) : SV_Target
  317. {
  318. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  319. float2 uv = input.uv;
  320. float2 delta = float2(0.0, _SourceSize.w * rcp(DOWNSAMPLE) * 2.0);
  321. return Blur(uv, delta);
  322. }
  323. half4 FinalBlur(Varyings input) : SV_Target
  324. {
  325. UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
  326. float2 uv = input.uv;
  327. float2 delta = _SourceSize.zw * rcp(DOWNSAMPLE);
  328. return 1.0 - BlurSmall(uv, delta );
  329. }
  330. #endif //UNIVERSAL_SSAO_INCLUDED