Quantcast
Channel: Questions in topic: "hlsl"
Viewing all 206 articles
Browse latest View live

How can I get correct lighting on a low poly water shader?

$
0
0
I have been working on this low poly water shader and I got each vertex to move up and down like I want to, I also got the shader to be transparent but I am having problems with lighting. If the metallic is set to 1, it is a solid color and I cant tell that the vertices are even moving in the center. The light hits the plane as if it was completely flat and leaves an object that was in the center a shadow of a flat line and not the way it is supposed to be. I am very new to shader writing so please correct me on anything else I did wrong. Here is my code: Shader "Custom/LowPolyWater" { Properties { _Color ("Color", Color) = (1,1,1,1) _MainTex("Albedo (RGB)", 2D) = "white" {} _Glossiness("Smoothness", Range(0,1)) = 0 _Metallic("Metallic", Range(0,1)) = 0 _Speed("Speed", Range(0, 5)) = 1 _Scale("Scale", Range(0, 3)) = 0.3 _Amount("Amount", Range(0, 0.5)) = 0.1 } SubShader { Tags { "RenderType"="Transparent" "Queue"="Transparent" } LOD 200 CGPROGRAM // Physically based Standard lighting model, and enable shadows on all light types #pragma surface surf Standard fullforwardshadows alpha #pragma vertex vert // Use shader model 3.0 target, to get nicer looking lighting #pragma target 3.0 sampler2D _MainTex; half _Glossiness; half _Metallic; fixed4 _Color; half _Speed; half _Scale; fixed _Amount; struct Input { float2 uv_MainTex; }; void vert(inout appdata_full v, out Input o) { //Idk why i usually need this but just in case UNITY_INITIALIZE_OUTPUT(Input, o); //I basically plugged functions and numbers in until something worked... my favorite meathod v.vertex.y = (sin((_Time.w * _Speed) + v.vertex.x / _Amount) + sin((_Time.w * _Speed) + v.vertex.z / _Amount)) * _Scale; } void surf (Input IN, inout SurfaceOutputStandard o) { // Albedo comes from a texture tinted by color fixed4 c = tex2D (_MainTex, IN.uv_MainTex) * _Color; o.Albedo = c.rgb; // Metallic and smoothness come from slider variables o.Metallic = _Metallic; o.Smoothness = _Glossiness; o.Alpha = _Color.a; } ENDCG } FallBack "Diffuse" }

doubles and compute shaders

$
0
0
I have a compute shader I use to crunch the n-body gravity calculations for my project. when I use all floats in the shader, it runs fine and can process the gravity calculations of 10,000 objects in about 8 ms. However. I can't use floats because part the gravity equation ((G x mass1 x mass2) / d^2) can produce a number greater than what floats can hold with 2 sun sized masses. This leads me to need to use doubles for that part of the calculations. This wouldn't be a problem, except it seems to SEVERELY increase the time it takes to execute the shader for 8 ms to 130 ms. Any input is appreciated. [numthreads(256,1,1)] void GravityComp (uint3 id : SV_DispatchThreadID) { uint ind = id.x; float2 gravResult = float3(0, 0); for (uint i = 0; i < (uint)numAsteroids; i++) { if (ind == i) continue; float distance = Distance(dataIn[ind].xy, dataIn[i].xy); double G = (double)0.0000000000667408; double m1 = (double)dataIn[ind].z; // mass double m2 = (double)dataIn[i].z; // mass double newt = (G * m1 * m2) / (double)pow(distance, 2); float acc = (float)(newt / m1); float2 dir = -normalize(dataIn[ind].xy - dataIn[i].xy); float2 grav = dir.xy * acc; gravResult.xy = gravResult.xy + grav.xy; } dataOut[ind].xy = gravResult.xy; }

[Shader] Usage #ifdef DIRECTIONAL

$
0
0
How can I use #ifdef DIRECTIONAL in surf + vert shader? What conditions? M.b. some .cgiinc or .include or HLSLPROGRAM only etc here is full shader Shader "mitay/cutout tree" { Properties { _Color ("Color", Color) = (1,1,1,1) _SpecColor ("Specular Color", Color) = (0.1, 0.1, 0.1, 1) _MainTex ("Albedo (RGB)", 2D) = "white" {} _BumpMap ("Bump (RGB)", 2D) = "bump" {} _Smoothness ("Smoothness", Range(0.001,1)) = 1 _Cutoff ("Alpha cutoff", Range(0.25,0.9)) = 0.5 [MaterialToggle] _isToggled("ShakeDirection1", Float) = 0 [MaterialToggle] _isToggled2("ShakeDirec tion2", Float) = 0 _ShakeDisplacement ("Displacement", Range (0, 1.0)) = 1.0 _ShakeTime ("Shake Time", Range (0, 1.0)) = 1.0 _ShakeWindspeed ("Shake Windspeed", Range (0, 1.0)) = 1.0 _ShakeBending ("Shake Bending", Range (0, 1.0)) = 0.2 // These are here only to provide default values [HideInInspector] _TreeInstanceColor ("TreeInstanceColor", Vector) = (1,1,1,1) [HideInInspector] _TreeInstanceScale ("TreeInstanceScale", Vector) = (1,1,1,1) [HideInInspector] _SquashAmount ("Squash", Float) = 1 } SubShader { Tags { "RenderType"="TreeTransparentCutout" } LOD 200 Cull Off CGPROGRAM // add "addshadow" to let unity know you're displacing verts // this will ensure their ShadowCaster + ShadowCollector passes use the vert function and have the correct positions #pragma surface surf BlinnPhong fullforwardshadows vertex:vert addshadow alphatest:_Cutoff //#include "UnityBuiltin2xTreeLibrary.cginc" #pragma target 3.0 float _isToggled; float _isToggled2; sampler2D _MainTex; sampler2D _BumpMap; fixed4 _Color; half _Smoothness; half _Glossiness; half _Speed; half _Amount; half _Distance; float _ShakeDisplacement; float _ShakeTime; float _ShakeWindspeed; float _ShakeBending; fixed4 _TreeInstanceColor; float4 _TreeInstanceScale; float4x4 _TerrainEngineBendTree; float4 _SquashPlaneNormal; float _SquashAmount; struct Input { float2 uv_MainTex; float2 uv_BumpMap; }; fixed4 LightingNormalizedBlinnPhong (SurfaceOutput s, fixed3 lightDir, fixed3 halfDir, fixed atten) { // TODO: conditional normalization using ifdef fixed3 nN = normalize(s.Normal); fixed diff = max( 0, dot(nN, lightDir) ); fixed nh = max( 0, dot(nN, halfDir) ); fixed spec = pow(nh, s.Specular*128) * s.Gloss; fixed4 c; c.rgb = _LightColor0.rgb * (s.Albedo * diff + spec) * atten; UNITY_OPAQUE_ALPHA(c.a); return c; } void FastSinCos (float4 val, out float4 s, out float4 c) { val = val * 6.408849 - 3.1415927; float4 r5 = val * val; float4 r6 = r5 * r5; float4 r7 = r6 * r5; float4 r8 = r6 * r5; float4 r1 = r5 * val; float4 r2 = r1 * r5; float4 r3 = r2 * r5; float4 sin7 = {1, -0.16161616, 0.0083333, -0.00019841} ; float4 cos8 = {-0.5, 0.041666666, -0.0013888889, 0.000024801587} ; s = val + r1 * sin7.y + r2 * sin7.z + r3 * sin7.w; c = 1 + r5 * cos8.x + r6 * cos8.y + r7 * cos8.z + r8 * cos8.w; } inline float4 Squash(in float4 pos) { // To squash the tree the vertex needs to be moved in the direction // of the squash plane. The plane is defined by the the: // plane point - point lying on the plane, defined in model space // plane normal - _SquashPlaneNormal.xyz // we're pushing squashed tree plane in direction of planeNormal by amount of _SquashPlaneNormal.w // this squashing has to match logic of tree billboards float3 planeNormal = _SquashPlaneNormal.xyz; // unoptimized version: //float3 planePoint = -planeNormal * _SquashPlaneNormal.w; //float3 projectedVertex = pos.xyz + dot(planeNormal, (planePoint - pos)) * planeNormal; // optimized version: float3 projectedVertex = pos.xyz - (dot(planeNormal.xyz, pos.xyz) + _SquashPlaneNormal.w) * planeNormal; pos = float4(lerp(projectedVertex, pos.xyz, _SquashAmount), 1); return pos; } void TerrainAnimateTree( inout float4 pos, float alpha ) { pos.xyz *= _TreeInstanceScale.xyz; float3 bent = mul(_TerrainEngineBendTree, float4(pos.xyz, 0.0)).xyz; pos.xyz = lerp( pos.xyz, bent, alpha ); pos = Squash(pos); } void vert (inout appdata_full v) { float factor = (1 - _ShakeDisplacement) * 0.5; const float _WindSpeed = (_ShakeWindspeed); const float _WaveScale = _ShakeDisplacement; const float4 _waveXSize = float4(0.048, 0.06, 0.24, 0.096); const float4 _waveZSize = float4 (0.024, .08, 0.08, 0.2); const float4 waveSpeed = float4 (1.2, 2, 1.6, 4.8); float4 _waveXmove = float4(0.024, 0.04, -0.12, 0.096); float4 _waveZmove = float4 (0.006, .02, -0.02, 0.1); float4 waves; waves = v.vertex.x * _waveXSize; waves += v.vertex.z * _waveZSize; waves += _Time.x * (1 - _ShakeTime * 2 - v.color.b ) * waveSpeed *_WindSpeed; float4 s, c; waves = frac (waves); FastSinCos (waves, s,c); float waveAmount = 1; if (_isToggled > 0) waveAmount = v.texcoord.y * (v.color.a + _ShakeBending); else waveAmount = v.texcoord.x * (v.color.a + _ShakeBending); s *= waveAmount; s *= normalize (waveSpeed); s = s * s; float fade = dot (s, 1.3); s = s * s; float3 waveMove = float3 (0,0,0); waveMove.x = dot (s, _waveXmove); waveMove.z = dot (s, _waveZmove); v.vertex.xz -= mul ((float3x3)unity_WorldToObject, waveMove).xz; v.color *= _TreeInstanceColor; float3 viewpos = mul(UNITY_MATRIX_MV, v.vertex); #ifdef DIRECTIONAL float3 viewpos = mul(UNITY_MATRIX_MV, v.vertex); float4 lightDir = 0; float4 lightColor = 0; lightDir.w = _AO; float4 light = UNITY_LIGHTMODEL_AMBIENT; for (int i = 0; i < 4; i++) { float atten = 1.0; #ifdef USE_CUSTOM_LIGHT_DIR lightDir.xyz = _TerrainTreeLightDirections[i]; lightColor = _TerrainTreeLightColors[i]; #else float3 toLight = unity_LightPosition[i].xyz - viewpos.xyz * unity_LightPosition[i].w; toLight.z *= -1.0; lightDir.xyz = mul( (float3x3)unity_CameraToWorld, normalize(toLight) ); float lengthSq = dot(toLight, toLight); atten = 1.0 / (1.0 + lengthSq * unity_LightAtten[i].z); lightColor.rgb = unity_LightColor[i].rgb; #endif lightDir.xyz *= _Occlusion; float occ = dot (v.tangent, lightDir); occ = max(0, occ); occ += _BaseLight; light += lightColor * (occ * atten); } v.color = light * _Color.rgb * _TreeInstanceColor; #endif TerrainAnimateTree(v.vertex, v.color.w); } void surf (Input IN, inout SurfaceOutput o) { fixed4 tex = tex2D (_MainTex, IN.uv_MainTex) * _Color; o.Albedo = tex.rgb ; o.Gloss = tex.a; o.Alpha = tex.a * _Color.a; o.Specular = _Smoothness; o.Normal = UnpackNormal(tex2D(_BumpMap, IN.uv_BumpMap)); } ENDCG } Dependency "BillboardShader" = "Hidden/Nature/Tree Soft Occlusion Leaves Rendertex" FallBack "Diffuse" }

Hlsl Pow function platform-dependent error

$
0
0
Hi guys, it looks like the pow function does crazy things if the exponent is too big. For example if it's 10 it causes the shader to simply return 0, regardless the result of the operation. The interesting point is, how can this be dependent on the OS (it actually is, the error does not happen on Linux of Mac)? And it also depends on the project it is into: the shader is working right in one project, and gives the error in another.

Webgl 1.0 error shader loop, workaround?

$
0
0
it seems webgl doenst support some kind of loops in the hlsl shader. My shader only have this variant. I wanna know a workaround this problem if possible and if anyone know in the future if these issues will be solved. Like tis version of WebGL 2.0 coming. My shader use a loop to iterate through points to print decals , it is a fragment shader. Error log is ERROR: 0:37: 'while' : This type of loop is not allowed. I already looked at https://docs.unity3d.com/Manual/webgl-graphics.html

Why does this shader give different result Editor/ Android?

$
0
0
Hi, I've written a simple *curved* unlit shader in HLSL that basically translates each vertex based on the distance to the camera (the magic happens in the vertex shader): Properties { _BendFactor ("Bend Factor", Vector) = (0, 0, 0, 0) _Color ("Color", Color) = (1,1,1,1) _MainTex ("Texture", 2D) = "white" {} } SubShader { Tags { "RenderType"="Opaque" } LOD 100 Lighting Off Pass { CGPROGRAM #pragma vertex vert #pragma fragment frag #include "UnityCG.cginc" struct appdata { float4 vertex : POSITION; float2 uv_tex1 : TEXCOORD0; }; struct v2f { float2 uv_tex1 : TEXCOORD0; float4 vertex : SV_POSITION; }; fixed4 _BendFactor; fixed4 _Color; sampler2D _MainTex; float4 _MainTex_ST; v2f vert (appdata v) { v2f o; // here I calculate the offset fixed4 offset = mul( unity_ObjectToWorld, v.vertex ); offset.xyz -= _WorldSpaceCameraPos.xyz; offset =_BendFactor * (offset.z * offset.z); o.vertex = UnityObjectToClipPos ( v.vertex) + offset ; o.uv_tex1 = TRANSFORM_TEX(v.uv_tex1, _MainTex); return o; } fixed4 frag (v2f i) : SV_Target { return tex2D(_MainTex, i.uv_tex1) * _Color; } ENDCG } } It worked perfectly in Unity 5.5, but I just updated to 5.6 and now I get different results in Editor vs Android device. On the Y axis, I get opposite translations: Editor: ![alt text][1] Huawei P9 plus: ![alt text][2] Does anyone know what could be the issue here? Thank you in advance! [1]: /storage/temp/95003-1.png [2]: /storage/temp/95005-3.png

how to approach instanceID in shader?

$
0
0
SkinnedMeshRenderer didn't batched in Unity. so i will implement GPU Skinning with GPU Instancing. i try to approach instanceID(like SV_InstanceID), but they are wrapped in preprocessor macro. so i find in built in shader(version is 5.6.1f). and i found this comment. // basic instancing setups // - UNITY_VERTEX_INPUT_INSTANCE_ID Declare instance ID field in vertex shader input / output struct. // - UNITY_GET_INSTANCE_ID (Internal) Get the instance ID from input struct. and im in frustration.. "Internal" keyword make me embrassed. i have just only one question. ***Can i use "instanceID" in shader code?***

Is it possible to render to a texture via RWTexture2D without using Graphics.blit

$
0
0
I would like to save the output of a fragment shader to a texture, so that I can attempt to reuse the data in later shaders. I originally used Graphics.Blit, but the problem I found is that it submits a quad to the shader, and I need to render a mesh. My current fallback is to use a RWStructuredBuffer to store the colour value. Unfortunately, I have to do this at a per-vertex basis, as I wouldn't know how many fragments there will be. Therefore, if I wanted to reuse the shaded data, it would have lost some accuracy. So, I would like to know if there is a way to define a rendertexture as the shader target, without using Graphics.blit. ![alt text][1] This is my current shader. ![alt text][2] And this is the code initialising and assigning the buffers. [1]: /storage/temp/96691-buffer-shader.png [2]: /storage/temp/96692-shadedbuffer-setup.png

How to reuse HLSL shader code?

$
0
0
Hello! I needed a set of shaders that curve based on the distance from camera. Basically, I took some of the default HLSL shaders Unity provides and changed the code in the vertex shader to adjust the position of the vertices. It works great, but I would like to know if there is a way to centralize the code in the vertex shader, as it is the same in all cases, and just pass that code throughout all shaders I need, because only the fragment shader is different. This is because if I need to change anything in the vertex shader function, I need to change it in 6 different places. Thank you!

(Shader)How to find the cordinate of a given color inside a ramp texture?

$
0
0
I'm currently working on palette swap feature for my game's sprites on Unity . I know that this is normaly achieved by using a gayscale texture and then using a ramp texture to replace the colors or mapping each base color to a given channel value and then doing a lookup . BUT , since every sprite is hand drawn and painted , there are like a gazillion different RGB values and applying those techniques are a little troublesome . So , what I want to do is write a shader that does the following : -Get the RGB value of the pixel being processed -Find the coordinate of that value in a palette texture ([n-colors]x2) (This is the part that i have no idea how to accomplish) -With its coordinate , get the swap color that would be one row beneat the original color inside the palette texture -Apply the new color to the sprite Basically , this ![alt text][1] [1]: https://i.stack.imgur.com/AHNCm.png What i need to know is how to find the color inside the palette texture , **basicaly a reverse tex2D(_Texture,coord)** Is there any way I could achieve this? If so , how efficient it is? Is there any other way?

Set list in shader (uniform)

$
0
0
There is a way to set a list for uniform? Something similar to "material.SetFloatArray" but with list? Or the restriction is for arrays? Also there is possible to change the array during runtime in shaders?

Where Is UNITY_POSITION(pos) Defined?

$
0
0
Hi All, I was hoping to copy and modify the following fragment shader in UnityStandardCoreForwardSimple.cginc: half4 fragForwardBaseSimpleInternal (VertexOutputBaseSimple i) { UNITY_APPLY_DITHER_CROSSFADE(i.pos.xy); FragmentCommonData s = FragmentSetupSimple(i); UnityLight mainLight = MainLightSimple(i, s); #if !defined(LIGHTMAP_ON) && defined(_NORMALMAP) half ndotl = saturate(dot(s.tangentSpaceNormal, i.tangentSpaceLightDir)); #else half ndotl = saturate(dot(s.normalWorld, mainLight.dir)); #endif //we can't have worldpos here (not enough interpolator on SM 2.0) so no shadow fade in that case. half shadowMaskAttenuation = UnitySampleBakedOcclusion(i.ambientOrLightmapUV, 0); half realtimeShadowAttenuation = SHADOW_ATTENUATION(i); half atten = UnityMixRealtimeAndBakedShadows(realtimeShadowAttenuation, shadowMaskAttenuation, 0); half occlusion = Occlusion(i.tex.xy); half rl = dot(REFLECTVEC_FOR_SPECULAR(i, s), LightDirForSpecular(i, mainLight)); UnityGI gi = FragmentGI (s, occlusion, i.ambientOrLightmapUV, atten, mainLight); half3 attenuatedLightColor = gi.light.color * ndotl; half3 c = BRDF3_Indirect(s.diffColor, s.specColor, gi.indirect, PerVertexGrazingTerm(i, s), PerVertexFresnelTerm(i)); c += BRDF3DirectSimple(s.diffColor, s.specColor, s.smoothness, rl) * attenuatedLightColor; c += Emission(i.tex.xy); UNITY_APPLY_FOG(i.fogCoord, c); return OutputForward (half4(c, 1), s.alpha); } But I get an error stating it doesn't recognize `VertexOutputBaseSimple`. If I include that it reads, "unrecognized identifier `UNITY_POSITION(pos)`". If anyone can point me in the correct direction I would greatly appreciate it! Thanks,

How to get picked worldposition to clipspace, in fragment shader? (Image effect shader)

$
0
0
So this is all happening in an image effect shader. I have converted each pixel from clipspace to world space. Applied an offset to this in world space and now I need the new world space coordinates in clipspace coords. I have ran out of things to try, I always get a result close to what id expect but.

How do i blur a Cubemap and Texture in CG/HLSL Unity 5

$
0
0
How can i blur(Gaussian) cubemaps and Textures in CG/HLSL in Unity 5?? Thanks in advanced.

Vertex and fragment shader advice

$
0
0
Evening, I'm learning about fragment and vertex shaders (beginner) and I kept scratching my head trying to get it right. My goal is to make a shader that is tileable (for indoor tiles) and customizable grouts (color and width). With the help of the Internet, I managed to learn some basic stuff about shaders and made minor tweaks to it. I used a ready made grid shader and tweaked the grid to not tile according to world space. Here's the code: Shader "Test/Unlit Grid" { Properties { //GRID PROPERTIES _GridColour ("Grid Colour", color) = (1, 1, 1, 1) _BaseColour ("Base Colour", color) = (1, 1, 1, 0) _GridSpacing ("Grid Spacing", float) = 0.1 _LineThickness ("Line Thickness", float) = 1 //2D TEXTURES _MainTex ("Base Texture", 2D) = "white"{} } SubShader { Tags { "RenderType"="Opaque" "Queue"="Transparent"} LOD 100 Blend SrcAlpha OneMinusSrcAlpha ZWrite Off Pass { CGPROGRAM #pragma vertex vert #pragma fragment frag #pragma target 3.0 #include "UnityCG.cginc" struct appdata { float4 vertex : POSITION; float2 uv : TEXCOORD0; }; struct v2f { float4 vertex : SV_POSITION; float2 uv : TEXCOORD0; }; fixed4 _GridColour; fixed4 _BaseColour; float _GridSpacing; float _LineThickness; sampler2D _MainTex; float4 _MainTex_ST; sampler2D _NormalTex; //VERT FUNCTION v2f vert (appdata v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); //o.uv = mul(unity_ObjectToWorld, v.vertex).xz / _GridSpacing; o.uv = TRANSFORM_TEX(v.uv, _MainTex)*_GridSpacing; return o; } //FRAG FUNCTION fixed4 frag (v2f i) : SV_Target { float2 wrapped = frac(i.uv); float2 range = abs(wrapped); float2 speeds; // Euclidean norm gives slightly more even thickness on diagonals float4 deltas = float4(ddx(i.uv), ddy(i.uv)); speeds = sqrt(float2( dot(deltas.xz, deltas.xz), dot(deltas.yw, deltas.yw) )); // Cheaper Manhattan norm in fwidth slightly exaggerates thickness of diagonals //speeds = fwidth(i.uv)/2; fixed4 col = tex2D(_MainTex, i.uv); float2 pixelRange = range/speeds; float lineWeight = saturate(min(pixelRange.x, pixelRange.y) /_LineThickness); //float lineWeight = min(pixelRange.x, pixelRange.y) - _LineThickness; return lerp(_GridColour, col, lineWeight); } ENDCG } } } Sample image: ![alt text][1] The question is, am I able to mix a Surface Shader below the fragment shader and still retain the grouts from the fragment shader itself? [1]: /storage/temp/100930-capture.jpg

Triangulate image in realtime

$
0
0
Hey everybody. I'm trying to create a shader that triangulates a texture. In first step the realtime part is not that important. This is my plan to achieve this: All this code is in the fragment shader part: 1. Apply Harris Corner detection to find interesting points / corners in the image. 2. Use this list of points an use Delaunay to create the triangles and color them in different colors. Can I achieve this with a shader or is it the wrong way? This is the effect I try to get ![alt text][1] Thanks for your help. [1]: /storage/temp/100949-delaunay-effect.jpg

Add additional UV channel for Blit

$
0
0
Hello, I wonder how do I send TEXCOORD1 vertex information to image effect shader when I'm doing Graphics.Blit? I've browse through the Internet however I cannot find anything useful. So if anyone can tell me how to add an additional UV channel I'll be very appreciated.

Simple Unlit HLSL Shader with Rim Lighting?

$
0
0
Hi! I'm looking for a way to create Rim Lighting effect on a simple Unlit shader. Everywhere I looked people are using the surface shaders and I'm not familiar with them yet. Here is my current shader. Is simple Unlit that uses vertex colors. struct appdata { float4 vertex : POSITION; fixed4 color : COLOR; }; struct v2f { float4 pos : SV_POSITION; fixed4 color : TEXCOORD2; }; v2f vert (appdata v) { v2f o; o.pos = UnityObjectToClipPos(v.vertex); o.color = v.color; UNITY_TRANSFER_FOG(o, o.pos); return o; } half4 frag (v2f i) : COLOR { return i.color; } I would like to have this effect: ![alt text][1] Where could I find an example for this using standard HLSL shaders instead of the Surf ones. Thank you! [1]: http://kylehalladay.com/images/post_images/2014-02-23/FresnelRim.png

"Floor" function produces artifacts in shader

$
0
0
Hello everyone, here is the output of the simple shader **tex2D( myTex , floor(i.uv*3)/3);** I need to do this to get a coherent noise from sampling an image. How can I avoid the flickering artifacts that appear? They appear in the middle, at points where i.uv.x/3=1, 2, 3 and i.uv.y/3=1, 2, 3 ![alt text][1] [1]: /storage/temp/102601-floor.png

Accessing and writing depth on a render texture in compute shader

$
0
0
Hey, so this might seem to be a stupid question, but so far after a few days of trying to find this I have found only people who don't know how to do this or people who assume you know how to do this, but no actual explanation. I am writing a compute shader which I am giving a rendertexture. So far changing the color of the rendertexture works fine. I declare it as a RWTexture2D and set the color for each pixel as a float4. My problem is that I also need to write to the depth buffer of my render texture too. How do I do this? Is there a way to read and write to the depth part of the rendertexture I am giving to my compute shader?
Viewing all 206 articles
Browse latest View live


<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>