ATTENTION: default value of option allow_glsl_extension_directive_midshader overridden by environment. ATTENTION: default value of option shader_precompile overridden by environment. GLSL IR for native fragment shader 1: ( (declare (location=0 shader_in ) vec4 gl_FragCoord) (declare (location=32 shader_in ) vec4 v) (declare (location=33 shader_in ) vec4 v) (declare (location=34 shader_in ) vec4 v) (declare (location=35 shader_in ) vec4 v) (declare (location=37 shader_in ) vec4 v) (declare (location=0 writeonly restrict shader_storage ) (array uint 0) u7) (declare (location=0 writeonly restrict shader_storage ) (array uvec4 0) u5) (declare (location=0 readonly restrict shader_storage ) (array uint 0) u4) (declare (location=0 uniform ) cb_ps11 idx_uniforms11_ps) (declare (location=0 uniform ) cb_ps13 idx_uniforms13_ps) (declare (location=0 uniform ) cb_ps12 idx_uniforms12_ps) (declare (location=0 uniform ) cb_ps6 idx_uniforms6_ps) (declare (location=0 uniform ) cb_ps0 idx_uniforms0_ps) (declare (location=4 shader_out ) vec4 o0) (declare (location=5 shader_out ) vec4 o1) (declare (location=6 shader_out ) vec4 o2) (declare () vec4 r0) (declare () vec4 r1) (declare () vec4 r2) (declare () vec4 r3) (declare () vec4 r4) (declare () vec4 r5) (declare () vec4 r6) (declare (location=8 uniform ) sampler2D resourceSamplerPair_0_ps) (declare (location=9 uniform ) sampler2D resourceSamplerPair_1_ps) (declare (location=10 uniform ) sampler2D resourceSamplerPair_2_ps) (declare (location=11 uniform ) sampler2D resourceSamplerPair_3_ps) ( function main (signature void (parameters ) ( (declare (temporary ) vec4 compiler_temp) (declare (temporary ) uint compiler_temp@2) (assign (x) (var_ref compiler_temp@2) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@2) (constant uint (96)) ) ) ) (assign (xy) (var_ref r0) (expression vec2 * (swiz xy (var_ref v) )(swiz xy (var_ref compiler_temp) )) ) (assign (x) (var_ref r0) (swiz w (tex vec4 (var_ref resourceSamplerPair_0_ps) (swiz xy (var_ref r0) ) 0 1 () ))) (assign (x) (var_ref r0) (expression float min (swiz x (var_ref r0) )(constant float (1.000000)) ) ) (assign (yz) (var_ref r0) (constant vec2 (1.000000 0.000000)) ) (loop ( (assign (w) (var_ref r0) (expression float i2f (expression int bitcast_f2i (swiz z (var_ref r0) )) ) ) (declare (temporary ) int compiler_temp@3) (declare (temporary ) vec4 compiler_temp@4) (declare (temporary ) uint compiler_temp@5) (assign (x) (var_ref compiler_temp@5) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@4) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@5) (constant uint (2176)) ) ) ) (if (expression bool >= (swiz w (var_ref r0) )(swiz z (var_ref compiler_temp@4) )) ( (assign (x) (var_ref compiler_temp@3) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@3) (constant int (0)) ) )) (assign (w) (var_ref r0) (expression float bitcast_i2f (var_ref compiler_temp@3) ) ) (declare (temporary ) uint compiler_temp@6) (assign (x) (var_ref compiler_temp@6) (expression uint bitcast_f2u (swiz w (var_ref r0) )) ) (if (expression bool != (var_ref compiler_temp@6) (constant uint (0)) ) ( break ) ()) (declare (temporary ) vec4 compiler_temp@7) (declare (temporary ) uint compiler_temp@8) (assign (x) (var_ref compiler_temp@8) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (expression int bitcast_f2i (swiz z (var_ref r0) )) ) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@7) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@8) (constant uint (0)) ) ) ) (declare (temporary ) vec4 compiler_temp@9) (declare (temporary ) uint compiler_temp@10) (assign (x) (var_ref compiler_temp@10) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@9) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@10) (constant uint (2064)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 * (swiz yyy (var_ref compiler_temp@7) )(swiz xyz (var_ref compiler_temp@9) )) ) (declare (temporary ) vec4 compiler_temp@11) (declare (temporary ) uint compiler_temp@12) (assign (x) (var_ref compiler_temp@12) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (expression int bitcast_f2i (swiz z (var_ref r0) )) ) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@11) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@12) (constant uint (0)) ) ) ) (declare (temporary ) vec4 compiler_temp@13) (declare (temporary ) uint compiler_temp@14) (assign (x) (var_ref compiler_temp@14) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@13) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@14) (constant uint (2048)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 fma (swiz xxx (var_ref compiler_temp@11) )(swiz xyz (var_ref compiler_temp@13) )(swiz xyz (var_ref r1) )) ) (declare (temporary ) vec4 compiler_temp@15) (declare (temporary ) uint compiler_temp@16) (assign (x) (var_ref compiler_temp@16) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (expression int bitcast_f2i (swiz z (var_ref r0) )) ) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@15) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@16) (constant uint (0)) ) ) ) (declare (temporary ) vec4 compiler_temp@17) (declare (temporary ) uint compiler_temp@18) (assign (x) (var_ref compiler_temp@18) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@17) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@18) (constant uint (2080)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 fma (swiz zzz (var_ref compiler_temp@15) )(swiz xyz (var_ref compiler_temp@17) )(swiz xyz (var_ref r1) )) ) (declare (temporary ) vec4 compiler_temp@19) (declare (temporary ) uint compiler_temp@20) (assign (x) (var_ref compiler_temp@20) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@19) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@20) (constant uint (2096)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 + (swiz xyz (var_ref r1) )(swiz xyz (var_ref compiler_temp@19) )) ) (assign (xyz) (var_ref r1) (expression vec3 + (expression vec3 neg (swiz xyz (var_ref r1) )) (swiz xyz (var_ref v@21) )) ) (declare (temporary ) vec4 compiler_temp@22) (declare (temporary ) uint compiler_temp@23) (assign (x) (var_ref compiler_temp@23) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (expression int + (constant int (32)) (expression int bitcast_f2i (swiz z (var_ref r0) )) ) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@22) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@23) (constant uint (0)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 * (swiz xyz (var_ref r1) )(expression vec3 rcp (expression vec3 abs (swiz www (var_ref compiler_temp@22) )) ) ) ) (assign (w) (var_ref r0) (expression float dot (swiz xyz (var_ref r1) )(swiz xyz (var_ref r1) )) ) (assign (w) (var_ref r0) (expression float min (swiz w (var_ref r0) )(constant float (1.000000)) ) ) (assign (y) (var_ref r0) (expression float min (swiz w (var_ref r0) )(swiz y (var_ref r0) )) ) (assign (z) (var_ref r0) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz z (var_ref r0) )) (constant int (1)) ) ) ) )) (assign (zw) (var_ref r0) (constant vec2 (0.000000 0.000000)) ) (loop ( (assign (x) (var_ref r1) (expression float i2f (expression int bitcast_f2i (swiz w (var_ref r0) )) ) ) (declare (temporary ) int compiler_temp@24) (declare (temporary ) vec4 compiler_temp@25) (declare (temporary ) uint compiler_temp@26) (assign (x) (var_ref compiler_temp@26) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@25) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@26) (constant uint (2176)) ) ) ) (if (expression bool >= (swiz x (var_ref r1) )(swiz x (var_ref compiler_temp@25) )) ( (assign (x) (var_ref compiler_temp@24) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@24) (constant int (0)) ) )) (assign (x) (var_ref r1) (expression float bitcast_i2f (var_ref compiler_temp@24) ) ) (declare (temporary ) uint compiler_temp@27) (assign (x) (var_ref compiler_temp@27) (expression uint bitcast_f2u (swiz x (var_ref r1) )) ) (if (expression bool != (var_ref compiler_temp@27) (constant uint (0)) ) ( break ) ()) (declare (temporary ) int compiler_temp@28) (assign (x) (var_ref compiler_temp@28) (expression int bitcast_f2i (swiz w (var_ref r0) )) ) (declare (temporary ) vec4 compiler_temp@29) (declare (temporary ) uint compiler_temp@30) (assign (x) (var_ref compiler_temp@30) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref compiler_temp@28) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@29) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@30) (constant uint (0)) ) ) ) (declare (temporary ) vec4 compiler_temp@31) (declare (temporary ) uint compiler_temp@32) (assign (x) (var_ref compiler_temp@32) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@31) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@32) (constant uint (2064)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 * (swiz yyy (var_ref compiler_temp@29) )(swiz xyz (var_ref compiler_temp@31) )) ) (declare (temporary ) int compiler_temp@33) (assign (x) (var_ref compiler_temp@33) (expression int bitcast_f2i (swiz w (var_ref r0) )) ) (declare (temporary ) vec4 compiler_temp@34) (declare (temporary ) uint compiler_temp@35) (assign (x) (var_ref compiler_temp@35) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref compiler_temp@33) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@34) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@35) (constant uint (0)) ) ) ) (declare (temporary ) vec4 compiler_temp@36) (declare (temporary ) uint compiler_temp@37) (assign (x) (var_ref compiler_temp@37) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@36) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@37) (constant uint (2048)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 fma (swiz xxx (var_ref compiler_temp@34) )(swiz xyz (var_ref compiler_temp@36) )(swiz xyz (var_ref r1) )) ) (declare (temporary ) int compiler_temp@38) (assign (x) (var_ref compiler_temp@38) (expression int bitcast_f2i (swiz w (var_ref r0) )) ) (declare (temporary ) vec4 compiler_temp@39) (declare (temporary ) uint compiler_temp@40) (assign (x) (var_ref compiler_temp@40) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref compiler_temp@38) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@39) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@40) (constant uint (0)) ) ) ) (declare (temporary ) vec4 compiler_temp@41) (declare (temporary ) uint compiler_temp@42) (assign (x) (var_ref compiler_temp@42) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@41) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@42) (constant uint (2080)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 fma (swiz zzz (var_ref compiler_temp@39) )(swiz xyz (var_ref compiler_temp@41) )(swiz xyz (var_ref r1) )) ) (declare (temporary ) vec4 compiler_temp@43) (declare (temporary ) uint compiler_temp@44) (assign (x) (var_ref compiler_temp@44) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@43) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@44) (constant uint (2096)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 + (swiz xyz (var_ref r1) )(swiz xyz (var_ref compiler_temp@43) )) ) (assign (xyz) (var_ref r1) (expression vec3 + (expression vec3 neg (swiz xyz (var_ref r1) )) (swiz xyz (var_ref v@21) )) ) (declare (temporary ) int compiler_temp@45) (assign (x) (var_ref compiler_temp@45) (expression int bitcast_f2i (swiz w (var_ref r0) )) ) (declare (temporary ) vec4 compiler_temp@46) (declare (temporary ) uint compiler_temp@47) (assign (x) (var_ref compiler_temp@47) (expression uint + (constant uint (0)) (expression uint * (expression uint i2u (var_ref compiler_temp@45) ) (constant uint (16)) ) ) ) (assign (xyzw) (var_ref compiler_temp@46) (expression vec4 ubo_load (constant uint (4)) (expression uint + (var_ref compiler_temp@47) (constant uint (0)) ) ) ) (assign (xyz) (var_ref r1) (expression vec3 * (swiz xyz (var_ref r1) )(expression vec3 rcp (swiz www (var_ref compiler_temp@46) )) ) ) (assign (x) (var_ref r1) (expression float dot (swiz xyz (var_ref r1) )(swiz xyz (var_ref r1) )) ) (assign (x) (var_ref r1) (expression float + (expression float neg (swiz x (var_ref r1) )) (constant float (1.000000)) ) ) (assign (x) (var_ref r1) (expression float max (swiz x (var_ref r1) )(constant float (0.000000)) ) ) (assign (x) (var_ref r1) (expression float dot (swiz xx (var_ref r1) )(swiz xx (var_ref r1) )) ) (assign (x) (var_ref r1) (expression float min (swiz x (var_ref r1) )(constant float (1.000000)) ) ) (assign (z) (var_ref r0) (expression float max (swiz z (var_ref r0) )(swiz x (var_ref r1) )) ) (assign (w) (var_ref r0) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz w (var_ref r0) )) (constant int (1)) ) ) ) )) (assign (y) (var_ref r0) (expression float + (swiz y (var_ref r0) )(swiz z (var_ref r0) )) ) (assign (y) (var_ref r0) (expression float sat (expression float + (swiz y (var_ref r0) )(swiz w (var_ref v@48) )) ) ) (assign (y) (var_ref r0) (expression float + (swiz y (var_ref r0) )(constant float (-0.990000)) ) ) (declare (temporary ) int compiler_temp@49) (if (expression bool < (swiz y (var_ref r0) )(constant float (0.000000)) ) ( (assign (x) (var_ref compiler_temp@49) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@49) (constant int (0)) ) )) (assign (y) (var_ref r0) (expression float bitcast_i2f (var_ref compiler_temp@49) ) ) (declare (temporary ) bool compiler_temp@50) (assign (x) (var_ref compiler_temp@50) (expression bool != (expression uint bitcast_f2u (swiz y (var_ref r0) )) (constant uint (0)) ) ) (assign (yz) (var_ref r0) (expression vec2 bitcast_u2f (expression uvec2 f2u (swiz xy (var_ref gl_FragCoord) )) ) ) (declare (temporary ) vec4 compiler_temp@51) (declare (temporary ) uint compiler_temp@52) (assign (x) (var_ref compiler_temp@52) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@51) (expression vec4 ubo_load (constant uint (0)) (expression uint + (var_ref compiler_temp@52) (constant uint (416)) ) ) ) (assign (w) (var_ref r0) (expression float bitcast_u2f (expression uint f2u (swiz z (var_ref compiler_temp@51) )) ) ) (assign (xy) (var_ref r1) (expression vec2 bitcast_u2f (expression uvec2 >> (expression uvec2 bitcast_f2u (swiz yz (var_ref r0) )) (constant uvec2 (1 1)) ) ) ) (assign (w) (var_ref r0) (expression float bitcast_u2f (expression uint >> (expression uint bitcast_f2u (swiz w (var_ref r0) )) (constant uint (1)) ) ) ) (assign (w) (var_ref r0) (expression float bitcast_i2f (expression int + (expression int * (expression int bitcast_f2i (swiz y (var_ref r1) )) (expression int bitcast_f2i (swiz w (var_ref r0) )) ) (expression int bitcast_f2i (swiz x (var_ref r1) )) ) ) ) (assign (w) (var_ref r0) (expression float bitcast_i2f (expression int << (expression int bitcast_f2i (swiz w (var_ref r0) )) (constant int (2)) ) ) ) (assign (x) (var_ref r1) (expression float bitcast_i2f (expression int << (expression int bitcast_f2i (swiz w (var_ref r0) )) (constant int (3)) ) ) ) (assign (x) (var_ref r1) (expression float bitcast_i2f (expression int bitfield_insert (expression int bitcast_f2i (swiz x (var_ref r1) )) (expression int bitcast_f2i (swiz y (var_ref r0) )) (constant int (3)) (constant int (1)) ) ) ) (assign (x) (var_ref r1) (expression float bitcast_i2f (expression int bitfield_insert (expression int bitcast_f2i (swiz x (var_ref r1) )) (expression int bitcast_f2i (swiz z (var_ref r0) )) (constant int (4)) (constant int (1)) ) ) ) (declare (temporary ) uvec2 compiler_temp@53) (declare (temporary ) uint compiler_temp@54) (declare (temporary ) uint compiler_temp@55) (assign (x) (var_ref compiler_temp@55) (expression uint + (constant uint (0)) (expression uint * (expression uint / (expression uint bitcast_f2u (swiz x (var_ref r1) )) (constant uint (4)) ) (constant uint (4)) ) ) ) (declare (temporary ) uint compiler_temp@56) (call __intrinsic_load_ssbo (var_ref compiler_temp@56) ((constant uint (2)) (expression uint + (var_ref compiler_temp@55) (constant uint (0)) ) (constant uint (2)) )) (assign (x) (var_ref compiler_temp@54) (var_ref compiler_temp@56) ) (assign (x) (var_ref compiler_temp@53) (var_ref compiler_temp@54) ) (declare (temporary ) uint compiler_temp@57) (declare (temporary ) uint compiler_temp@58) (assign (x) (var_ref compiler_temp@58) (expression uint + (constant uint (0)) (expression uint * (expression uint + (expression uint / (expression uint bitcast_f2u (swiz x (var_ref r1) )) (constant uint (4)) ) (constant uint (1)) ) (constant uint (4)) ) ) ) (declare (temporary ) uint compiler_temp@59) (call __intrinsic_load_ssbo (var_ref compiler_temp@59) ((constant uint (2)) (expression uint + (var_ref compiler_temp@58) (constant uint (0)) ) (constant uint (2)) )) (assign (x) (var_ref compiler_temp@57) (var_ref compiler_temp@59) ) (assign (y) (var_ref compiler_temp@53) (var_ref compiler_temp@57) ) (assign (yz) (var_ref r1) (expression vec2 bitcast_u2f (var_ref compiler_temp@53) ) ) (assign (yz) (var_ref r1) (expression vec2 bitcast_i2f (expression ivec2 csel (expression bvec2 == (swiz yz (var_ref r1) )(swiz zz (var_ref gl_FragCoord) )) (constant ivec2 (-1 -1)) (constant ivec2 (0 0)) ) ) ) (assign (z) (var_ref r1) (expression float bitcast_u2f (expression uint csel (expression bool i2b (expression int u2i (expression uint bitcast_f2u (swiz z (var_ref r1) )) ) ) (constant uint (1065353216)) (constant uint (3212836864)) ) ) ) (assign (y) (var_ref r1) (expression float bitcast_u2f (expression uint csel (expression bool i2b (expression int u2i (expression uint bitcast_f2u (swiz y (var_ref r1) )) ) ) (constant uint (0)) (expression uint bitcast_f2u (swiz z (var_ref r1) )) ) ) ) (declare (temporary ) int compiler_temp@60) (if (expression bool >= (swiz y (var_ref r1) )(constant float (0.000000)) ) ( (assign (x) (var_ref compiler_temp@60) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@60) (constant int (0)) ) )) (assign (z) (var_ref r1) (expression float bitcast_i2f (var_ref compiler_temp@60) ) ) (declare (temporary ) int compiler_temp@61) (if (expression bool < (constant float (0.020000)) (swiz x (var_ref r0) )) ( (assign (x) (var_ref compiler_temp@61) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@61) (constant int (0)) ) )) (assign (w) (var_ref r1) (expression float bitcast_i2f (var_ref compiler_temp@61) ) ) (assign (z) (var_ref r1) (expression float bitcast_u2f (expression uint & (expression uint bitcast_f2u (swiz w (var_ref r1) )) (expression uint bitcast_f2u (swiz z (var_ref r1) )) ) ) ) (declare (temporary ) uint compiler_temp@62) (assign (x) (var_ref compiler_temp@62) (expression uint bitcast_f2u (swiz z (var_ref r1) )) ) (if (expression bool != (var_ref compiler_temp@62) (constant uint (0)) ) ( (declare (temporary ) vec4 compiler_temp@63) (declare (temporary ) uint compiler_temp@64) (assign (x) (var_ref compiler_temp@64) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@63) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@64) (constant uint (80)) ) ) ) (assign (xyzw) (var_ref r2) (expression vec4 * (swiz xyxy (var_ref v) )(swiz zwxy (var_ref compiler_temp@63) )) ) (assign (zw) (var_ref r1) (swiz xy (tex vec4 (var_ref resourceSamplerPair_1_ps) (swiz xy (var_ref r2) ) 0 1 () ))) (declare (temporary ) vec4 compiler_temp@65) (declare (temporary ) uint compiler_temp@66) (assign (x) (var_ref compiler_temp@66) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@65) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@66) (constant uint (64)) ) ) ) (assign (zw) (var_ref r1) (expression vec2 * (swiz zw (var_ref r1) )(swiz xy (var_ref compiler_temp@65) )) ) (assign (xyz) (var_ref r2) (swiz xyz (tex vec4 (var_ref resourceSamplerPair_2_ps) (swiz zw (var_ref r2) ) 0 1 () ))) (declare (temporary ) vec4 compiler_temp@67) (declare (temporary ) uint compiler_temp@68) (assign (x) (var_ref compiler_temp@68) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@67) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@68) (constant uint (16)) ) ) ) (assign (xyz) (var_ref r3) (expression vec3 * (swiz www (var_ref r1) )(swiz xyz (var_ref compiler_temp@67) )) ) (declare (temporary ) vec4 compiler_temp@69) (declare (temporary ) uint compiler_temp@70) (assign (x) (var_ref compiler_temp@70) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@69) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@70) (constant uint (0)) ) ) ) (assign (xyz) (var_ref r2) (expression vec3 fma (swiz xyz (var_ref r2) )(swiz xyz (var_ref compiler_temp@69) )(swiz xyz (var_ref r3) )) ) (declare (temporary ) vec4 compiler_temp@71) (declare (temporary ) uint compiler_temp@72) (assign (x) (var_ref compiler_temp@72) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@71) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@72) (constant uint (32)) ) ) ) (assign (xyz) (var_ref r2) (expression vec3 sat (expression vec3 fma (swiz zzz (var_ref r1) )(swiz xyz (var_ref compiler_temp@71) )(swiz xyz (var_ref r2) )) ) ) (declare (temporary ) vec4 compiler_temp@73) (declare (temporary ) uint compiler_temp@74) (assign (x) (var_ref compiler_temp@74) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@73) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@74) (constant uint (96)) ) ) ) (assign (zw) (var_ref r1) (expression vec2 * (swiz xy (var_ref v) )(swiz zw (var_ref compiler_temp@73) )) ) (assign (z) (var_ref r1) (swiz x (tex vec4 (var_ref resourceSamplerPair_3_ps) (swiz zw (var_ref r1) ) 0 1 () ))) (assign (w) (var_ref r1) (expression float dot (swiz xyz (var_ref v@21) )(swiz xyz (var_ref v@21) )) ) (assign (w) (var_ref r1) (expression float rsq (swiz w (var_ref r1) )) ) (assign (xyz) (var_ref r3) (expression vec3 * (swiz www (var_ref r1) )(swiz xyz (var_ref v@21) )) ) (assign (w) (var_ref r1) (expression float dot (swiz xyz (var_ref v@75) )(swiz xyz (var_ref v@75) )) ) (assign (w) (var_ref r1) (expression float rsq (swiz w (var_ref r1) )) ) (assign (xyz) (var_ref r4) (expression vec3 * (swiz www (var_ref r1) )(swiz xyz (var_ref v@75) )) ) (declare (temporary ) vec4 compiler_temp@76) (declare (temporary ) uint compiler_temp@77) (assign (x) (var_ref compiler_temp@77) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@76) (expression vec4 ubo_load (constant uint (3)) (expression uint + (var_ref compiler_temp@77) (constant uint (112)) ) ) ) (assign (z) (var_ref r1) (expression float sat (expression float * (swiz z (var_ref r1) )(swiz w (var_ref compiler_temp@76) )) ) ) (assign (w) (var_ref r1) (expression float + (expression float abs (swiz y (var_ref v@78) )) (expression float abs (swiz x (var_ref v@78) )) ) ) (assign (w) (var_ref r1) (expression float + (swiz w (var_ref r1) )(expression float abs (swiz z (var_ref v@78) )) ) ) (assign (w) (var_ref r1) (expression float rcp (swiz w (var_ref r1) )) ) (assign (xy) (var_ref r5) (expression vec2 * (swiz ww (var_ref r1) )(expression vec2 neg (swiz xy (var_ref v@78) )) ) ) (declare (temporary ) int compiler_temp@79) (if (expression bool >= (constant float (0.000000)) (expression float neg (swiz z (var_ref v@78) )) ) ( (assign (x) (var_ref compiler_temp@79) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@79) (constant int (0)) ) )) (assign (w) (var_ref r1) (expression float bitcast_i2f (var_ref compiler_temp@79) ) ) (assign (zw) (var_ref r5) (expression vec2 + (expression vec2 neg (expression vec2 abs (swiz yx (var_ref r5) )) ) (constant vec2 (1.000000 1.000000)) ) ) (assign (xy) (var_ref r6) (expression vec2 bitcast_i2f (expression ivec2 csel (expression bvec2 >= (swiz xy (var_ref r5) )(constant vec2 (0.000000 0.000000)) ) (constant ivec2 (-1 -1)) (constant ivec2 (0 0)) ) ) ) (assign (xy) (var_ref r6) (expression vec2 bitcast_u2f (expression uvec2 csel (expression bvec2 i2b (expression ivec2 u2i (expression uvec2 bitcast_f2u (swiz xy (var_ref r6) )) ) ) (constant uvec2 (1065353216 1065353216)) (constant uvec2 (3212836864 3212836864)) ) ) ) (assign (zw) (var_ref r5) (expression vec2 * (swiz zw (var_ref r5) )(swiz xy (var_ref r6) )) ) (assign (xy) (var_ref r5) (expression vec2 bitcast_u2f (expression uvec2 csel (expression bvec2 i2b (expression ivec2 u2i (expression uvec2 bitcast_f2u (swiz ww (var_ref r1) )) ) ) (expression uvec2 bitcast_f2u (swiz zw (var_ref r5) )) (expression uvec2 bitcast_f2u (swiz xy (var_ref r5) )) ) ) ) (assign (w) (var_ref r1) (expression float dot (expression vec3 neg (swiz xyz (var_ref v@78) )) (swiz xyz (var_ref r3) )) ) (assign (xyz) (var_ref r3) (expression vec3 fma (expression vec3 neg (swiz www (var_ref r1) )) (expression vec3 neg (swiz xyz (var_ref v@78) )) (swiz xyz (var_ref r3) )) ) (assign (w) (var_ref r1) (expression float dot (swiz xyz (var_ref r3) )(swiz xyz (var_ref r3) )) ) (assign (w) (var_ref r1) (expression float rsq (swiz w (var_ref r1) )) ) (assign (xyz) (var_ref r3) (expression vec3 * (swiz www (var_ref r1) )(swiz xyz (var_ref r3) )) ) (assign (xyz) (var_ref r6) (expression vec3 * (swiz yzx (var_ref r3) )(expression vec3 neg (swiz zxy (var_ref v@78) )) ) ) (assign (xyz) (var_ref r6) (expression vec3 fma (expression vec3 neg (swiz yzx (var_ref v@78) )) (swiz zxy (var_ref r3) )(expression vec3 neg (swiz xyz (var_ref r6) )) ) ) (assign (w) (var_ref r1) (expression float dot (swiz xyz (var_ref r4) )(swiz xyz (var_ref r3) )) ) (assign (w) (var_ref r2) (expression float dot (swiz xyz (var_ref r4) )(swiz xyz (var_ref r6) )) ) (declare (temporary ) int compiler_temp@80) (if (expression bool >= (swiz w (var_ref r2) )(constant float (0.000000)) ) ( (assign (x) (var_ref compiler_temp@80) (constant int (-1)) ) ) ( (assign (x) (var_ref compiler_temp@80) (constant int (0)) ) )) (assign (w) (var_ref r2) (expression float bitcast_i2f (var_ref compiler_temp@80) ) ) (assign (x) (var_ref r3) (expression float + (expression float neg (expression float abs (swiz w (var_ref r1) )) ) (constant float (1.000000)) ) ) (assign (x) (var_ref r3) (expression float sqrt (swiz x (var_ref r3) )) ) (assign (y) (var_ref r3) (expression float fma (expression float abs (swiz w (var_ref r1) )) (constant float (-0.018729)) (constant float (0.074261)) ) ) (assign (y) (var_ref r3) (expression float fma (swiz y (var_ref r3) )(expression float abs (swiz w (var_ref r1) )) (constant float (-0.212114)) ) ) (assign (w) (var_ref r1) (expression float fma (swiz y (var_ref r3) )(expression float abs (swiz w (var_ref r1) )) (constant float (1.570729)) ) ) (assign (w) (var_ref r1) (expression float * (swiz x (var_ref r3) )(swiz w (var_ref r1) )) ) (assign (w) (var_ref r1) (expression float * (swiz w (var_ref r1) )(constant float (0.636620)) ) ) (assign (w) (var_ref r1) (expression float min (swiz w (var_ref r1) )(constant float (1.000000)) ) ) (assign (w) (var_ref r1) (expression float * (swiz w (var_ref r1) )(constant float (63.000000)) ) ) (assign (w) (var_ref r1) (expression float bitcast_u2f (expression uint f2u (swiz w (var_ref r1) )) ) ) (assign (xy) (var_ref r3) (expression vec2 sat (expression vec2 fma (swiz xy (var_ref r5) )(constant vec2 (0.500000 0.500000)) (constant vec2 (0.500000 0.500000)) ) ) ) (assign (xy) (var_ref r3) (expression vec2 * (swiz xy (var_ref r3) )(constant vec2 (511.000000 511.000000)) ) ) (assign (xy) (var_ref r3) (expression vec2 bitcast_u2f (expression uvec2 f2u (swiz xy (var_ref r3) )) ) ) (assign (xy) (var_ref r3) (expression vec2 bitcast_i2f (expression ivec2 << (expression ivec2 bitcast_f2i (swiz xy (var_ref r3) )) (constant ivec2 (17 8)) ) ) ) (assign (w) (var_ref r1) (expression float bitcast_i2f (expression int + (expression int * (expression int bitcast_f2i (swiz w (var_ref r1) )) (constant int (67108864)) ) (expression int bitcast_f2i (swiz x (var_ref r3) )) ) ) ) (assign (w) (var_ref r1) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz w (var_ref r1) )) (expression int bitcast_f2i (swiz y (var_ref r3) )) ) ) ) (assign (x) (var_ref r3) (expression float + (expression float neg (swiz x (var_ref r0) )) (constant float (1.000000)) ) ) (assign (x) (var_ref r3) (expression float min (swiz x (var_ref r3) )(constant float (1.000000)) ) ) (assign (x) (var_ref r3) (expression float * (swiz x (var_ref r3) )(constant float (127.000000)) ) ) (assign (x) (var_ref r3) (expression float bitcast_u2f (expression uint f2u (swiz x (var_ref r3) )) ) ) (assign (x) (var_ref r3) (expression float bitcast_i2f (expression int << (expression int bitcast_f2i (swiz x (var_ref r3) )) (constant int (1)) ) ) ) (assign (x) (var_ref r3) (expression float bitcast_u2f (expression uint & (expression uint bitcast_f2u (swiz x (var_ref r3) )) (constant uint (252)) ) ) ) (assign (w) (var_ref r1) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz w (var_ref r1) )) (expression int bitcast_f2i (swiz x (var_ref r3) )) ) ) ) (assign (x) (var_ref r3) (expression float bitcast_i2f (expression int bitfield_insert (expression int bitcast_f2i (swiz w (var_ref r1) )) (expression int bitcast_f2i (swiz w (var_ref r2) )) (constant int (0)) (constant int (1)) ) ) ) (assign (xyz) (var_ref r2) (expression vec3 * (swiz xyz (var_ref r2) )(constant vec3 (127.000000 127.000000 63.000000)) ) ) (assign (xyz) (var_ref r2) (expression vec3 bitcast_u2f (expression uvec3 f2u (swiz xyz (var_ref r2) )) ) ) (assign (yz) (var_ref r2) (expression vec2 bitcast_i2f (expression ivec2 << (expression ivec2 bitcast_f2i (swiz yz (var_ref r2) )) (constant ivec2 (18 12)) ) ) ) (assign (w) (var_ref r1) (expression float bitcast_i2f (expression int + (expression int * (expression int bitcast_f2i (swiz x (var_ref r2) )) (constant int (33554432)) ) (expression int bitcast_f2i (swiz y (var_ref r2) )) ) ) ) (assign (w) (var_ref r1) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz w (var_ref r1) )) (expression int bitcast_f2i (swiz z (var_ref r2) )) ) ) ) (assign (z) (var_ref r1) (expression float * (swiz z (var_ref r1) )(constant float (63.000000)) ) ) (assign (z) (var_ref r1) (expression float bitcast_u2f (expression uint f2u (swiz z (var_ref r1) )) ) ) (assign (z) (var_ref r1) (expression float bitcast_i2f (expression int << (expression int bitcast_f2i (swiz z (var_ref r1) )) (constant int (6)) ) ) ) (assign (z) (var_ref r1) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz w (var_ref r1) )) (expression int bitcast_f2i (swiz z (var_ref r1) )) ) ) ) (declare (temporary ) vec4 compiler_temp@81) (declare (temporary ) uint compiler_temp@82) (assign (x) (var_ref compiler_temp@82) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@81) (expression vec4 ubo_load (constant uint (1)) (expression uint + (var_ref compiler_temp@82) (constant uint (0)) ) ) ) (assign (y) (var_ref r3) (expression float bitcast_i2f (expression int bitfield_insert (expression int bitcast_f2i (swiz z (var_ref r1) )) (expression int bitcast_f2i (swiz x (var_ref compiler_temp@81) )) (constant int (0)) (constant int (6)) ) ) ) (assign (y) (var_ref r1) (expression float bitcast_u2f (expression uint f2u (swiz y (var_ref r1) )) ) ) (assign (w) (var_ref r0) (expression float bitcast_i2f (expression int << (expression int bitcast_f2i (swiz w (var_ref r0) )) (constant int (1)) ) ) ) (assign (y) (var_ref r0) (expression float bitcast_i2f (expression int bitfield_insert (expression int bitcast_f2i (swiz w (var_ref r0) )) (expression int bitcast_f2i (swiz y (var_ref r0) )) (constant int (1)) (constant int (1)) ) ) ) (assign (y) (var_ref r0) (expression float bitcast_i2f (expression int bitfield_insert (expression int bitcast_f2i (swiz y (var_ref r0) )) (expression int bitcast_f2i (swiz z (var_ref r0) )) (constant int (2)) (constant int (1)) ) ) ) (assign (y) (var_ref r0) (expression float bitcast_i2f (expression int + (expression int bitcast_f2i (swiz y (var_ref r1) )) (expression int bitcast_f2i (swiz y (var_ref r0) )) ) ) ) (if (expression bool ! (var_ref compiler_temp@50) ) ( (declare (temporary ) uvec4 compiler_temp@83) (declare (temporary ) uint compiler_temp@84) (assign (x) (var_ref compiler_temp@84) (expression uint + (constant uint (0)) (expression uint * (expression uint bitcast_f2u (swiz y (var_ref r0) )) (constant uint (16)) ) ) ) (assign (xy) (var_ref compiler_temp@83) (expression uvec2 i2u (expression ivec2 bitcast_f2i (swiz xy (var_ref r3) )) ) ) (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref compiler_temp@84) (constant uint (0)) ) (var_ref compiler_temp@83) (constant uint (3)) (constant uint (2)) )) ) ()) (if (expression bool ! (var_ref compiler_temp@50) ) ( (declare (temporary ) uint compiler_temp@85) (declare (temporary ) uint compiler_temp@86) (assign (x) (var_ref compiler_temp@86) (expression uint + (constant uint (0)) (expression uint * (expression uint / (expression uint bitcast_f2u (swiz x (var_ref r1) )) (constant uint (4)) ) (constant uint (4)) ) ) ) (declare (temporary ) vec4 compiler_temp@87) (declare (temporary ) uint compiler_temp@88) (assign (x) (var_ref compiler_temp@88) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@87) (expression vec4 ubo_load (constant uint (1)) (expression uint + (var_ref compiler_temp@88) (constant uint (0)) ) ) ) (assign (x) (var_ref compiler_temp@85) (expression uint i2u (expression int bitcast_f2i (swiz y (var_ref compiler_temp@87) )) ) ) (call __intrinsic_store_ssbo ((constant uint (0)) (expression uint + (var_ref compiler_temp@86) (constant uint (0)) ) (var_ref compiler_temp@85) (constant uint (1)) (constant uint (2)) )) ) ()) (if (expression bool ! (var_ref compiler_temp@50) ) ( (declare (temporary ) uint compiler_temp@89) (declare (temporary ) uint compiler_temp@90) (assign (x) (var_ref compiler_temp@90) (expression uint + (constant uint (0)) (expression uint * (expression uint + (expression uint / (expression uint bitcast_f2u (swiz x (var_ref r1) )) (constant uint (4)) ) (constant uint (1)) ) (constant uint (4)) ) ) ) (declare (temporary ) vec4 compiler_temp@91) (declare (temporary ) uint compiler_temp@92) (assign (x) (var_ref compiler_temp@92) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@91) (expression vec4 ubo_load (constant uint (1)) (expression uint + (var_ref compiler_temp@92) (constant uint (0)) ) ) ) (assign (x) (var_ref compiler_temp@89) (expression uint i2u (expression int bitcast_f2i (swiz z (var_ref compiler_temp@91) )) ) ) (call __intrinsic_store_ssbo ((constant uint (0)) (expression uint + (var_ref compiler_temp@90) (constant uint (0)) ) (var_ref compiler_temp@89) (constant uint (1)) (constant uint (2)) )) ) ()) ) ()) (declare (temporary ) vec4 compiler_temp@93) (declare (temporary ) uint compiler_temp@94) (assign (x) (var_ref compiler_temp@94) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@93) (expression vec4 ubo_load (constant uint (2)) (expression uint + (var_ref compiler_temp@94) (constant uint (0)) ) ) ) (assign (yzw) (var_ref r0) (expression vec3 * (swiz xxx (var_ref r0) )(swiz xyz (var_ref compiler_temp@93) )) ) (declare (temporary ) vec4 compiler_temp@95) (declare (temporary ) uint compiler_temp@96) (assign (x) (var_ref compiler_temp@96) (constant uint (0)) ) (assign (xyzw) (var_ref compiler_temp@95) (expression vec4 ubo_load (constant uint (2)) (expression uint + (var_ref compiler_temp@96) (constant uint (0)) ) ) ) (assign (xyz) (var_ref o0) (expression vec3 * (swiz yzw (var_ref r0) )(swiz www (var_ref compiler_temp@95) )) ) (assign (y) (var_ref r0) (expression float + (expression float neg (swiz z (var_ref v@21) )) (constant float (-30.000000)) ) ) (assign (yz) (var_ref r0) (expression vec2 max (swiz yx (var_ref r0) )(constant vec2 (0.000000 0.000000)) ) ) (assign (y) (var_ref r0) (expression float * (swiz y (var_ref r0) )(constant float (0.016667)) ) ) (assign (y) (var_ref r0) (expression float fma (expression float neg (swiz y (var_ref r0) )) (swiz y (var_ref r0) )(constant float (1.000000)) ) ) (assign (y) (var_ref r0) (expression float max (swiz y (var_ref r0) )(constant float (0.000000)) ) ) (assign (w) (var_ref o1) (expression float * (swiz y (var_ref r0) )(swiz z (var_ref r0) )) ) (assign (w) (var_ref o0) (swiz x (var_ref r0) )) (assign (x) (var_ref o1) (expression float neg (swiz z (var_ref v@21) )) ) (assign (yz) (var_ref o1) (constant vec2 (65504.000000 65504.000000)) ) (assign (xyzw) (var_ref o2) (constant vec4 (0.000000 0.000000 0.000000 0.000000)) ) (discard (var_ref compiler_temp@50) ) )) ) ) NIR (SSA form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: GLSL1 inputs: 0 outputs: 0 uniforms: 16 shared: 0 decl_var shader_storage INTERP_MODE_NONE restrict writeonly uint[] u7 (0, 0, 0) decl_var shader_storage INTERP_MODE_NONE restrict writeonly uvec4[] u5 (0, 0, 0) decl_var shader_storage INTERP_MODE_NONE restrict readonly uint[] u4 (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps11 idx_uniforms11_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps13 idx_uniforms13_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps12 idx_uniforms12_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps6 idx_uniforms6_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps0 idx_uniforms0_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_0_ps (8, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_1_ps (9, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_2_ps (10, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_3_ps (11, 0, 0) decl_var uniform INTERP_MODE_NONE vec4 gl_FbWposYTransform (0, 0, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 gl_FragCoord (VARYING_SLOT_POS, 0, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v (VARYING_SLOT_VAR1, 32, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@0 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@1 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@2 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@3 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 o0 (FRAG_RESULT_DATA0, 8, 0) decl_var shader_out INTERP_MODE_NONE vec4 o1 (FRAG_RESULT_DATA1, 10, 0) decl_var shader_out INTERP_MODE_NONE vec4 o2 (FRAG_RESULT_DATA2, 12, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec2 32 ssa_0 = intrinsic load_barycentric_pixel () (1) /* interp_mode=1 */ vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2 = intrinsic load_interpolated_input (ssa_0, ssa_1) (32, 0) /* base=32 */ /* component=0 */ vec4 32 ssa_3 = intrinsic load_interpolated_input (ssa_0, ssa_1) (37, 0) /* base=37 */ /* component=0 */ vec4 32 ssa_4 = intrinsic load_interpolated_input (ssa_0, ssa_1) (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_5 = intrinsic load_interpolated_input (ssa_0, ssa_1) (34, 0) /* base=34 */ /* component=0 */ vec4 32 ssa_6 = intrinsic load_interpolated_input (ssa_0, ssa_1) (35, 0) /* base=35 */ /* component=0 */ vec1 32 ssa_7 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_8 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_9 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_12 = load_const (0xbf7d70a4 /* -0.990000 */) vec1 32 ssa_13 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_14 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_15 = load_const (0x3ca3d70a /* 0.020000 */) vec1 32 ssa_16 = load_const (0xbc996e30 /* -0.018729 */) vec1 32 ssa_17 = load_const (0x3d981627 /* 0.074261 */) vec1 32 ssa_18 = load_const (0xbe593484 /* -0.212114 */) vec1 32 ssa_19 = load_const (0x3fc90da4 /* 1.570729 */) vec1 32 ssa_20 = load_const (0x3f22f984 /* 0.636620 */) vec1 32 ssa_21 = load_const (0x427c0000 /* 63.000000 */) vec1 32 ssa_22 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_23 = load_const (0x43ff8000 /* 511.000000 */) vec1 32 ssa_24 = load_const (0x00000011 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_26 = load_const (0x42fe0000 /* 127.000000 */) vec1 32 ssa_27 = load_const (0x000000fc /* 0.000000 */) vec1 32 ssa_28 = load_const (0x00000012 /* 0.000000 */) vec1 32 ssa_29 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_30 = load_const (0x00000006 /* 0.000000 */) vec1 32 ssa_31 = load_const (0xc1f00000 /* -30.000000 */) vec1 32 ssa_32 = load_const (0x3c888889 /* 0.016667 */) vec1 32 ssa_33 = load_const (0x477fe000 /* 65504.000000 */) vec4 32 ssa_34 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_35 = load_const (0x00000060 /* 0.000000 */) vec4 32 ssa_36 = intrinsic load_ubo (ssa_7, ssa_35) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec4 32 ssa_37 = intrinsic load_interpolated_input (ssa_0, ssa_1) (33, 0) /* base=33 */ /* component=0 */ vec1 32 ssa_38 = fmul ssa_37.x, ssa_36.x vec1 32 ssa_39 = fmul ssa_37.y, ssa_36.y vec2 32 ssa_40 = vec2 ssa_38, ssa_39 vec4 32 ssa_41 = tex ssa_40 (coord), 0 (texture), 0 (sampler), vec1 32 ssa_42 = fmin ssa_41.w, ssa_8 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_4 */ vec1 32 ssa_43 = phi block_0: ssa_8, block_4: ssa_83 vec1 32 ssa_44 = phi block_0: ssa_1, block_4: ssa_84 vec1 32 ssa_45 = i2f32 ssa_44 vec1 32 ssa_46 = load_const (0x00000880 /* 0.000000 */) vec4 32 ssa_47 = intrinsic load_ubo (ssa_9, ssa_46) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_48 = fge ssa_45, ssa_47.z /* succs: block_2 block_3 */ if ssa_48 { block block_2: /* preds: block_1 */ break /* succs: block_5 */ } else { block block_3: /* preds: block_1 */ /* succs: block_4 */ } block block_4: /* preds: block_3 */ vec1 32 ssa_49 = iadd ssa_10, ssa_44 vec1 32 ssa_50 = ishl ssa_49, ssa_9 vec4 32 ssa_51 = intrinsic load_ubo (ssa_9, ssa_50) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_52 = load_const (0x00000810 /* 0.000000 */) vec4 32 ssa_53 = intrinsic load_ubo (ssa_9, ssa_52) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_54 = fmul ssa_51.y, ssa_53.x vec1 32 ssa_55 = fmul ssa_51.y, ssa_53.y vec1 32 ssa_56 = fmul ssa_51.y, ssa_53.z vec1 32 ssa_57 = load_const (0x00000800 /* 0.000000 */) vec4 32 ssa_58 = intrinsic load_ubo (ssa_9, ssa_57) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_59 = ffma ssa_51.x, ssa_58.x, ssa_54 vec1 32 ssa_60 = ffma ssa_51.x, ssa_58.y, ssa_55 vec1 32 ssa_61 = ffma ssa_51.x, ssa_58.z, ssa_56 vec1 32 ssa_62 = load_const (0x00000820 /* 0.000000 */) vec4 32 ssa_63 = intrinsic load_ubo (ssa_9, ssa_62) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_64 = ffma ssa_51.z, ssa_63.x, ssa_59 vec1 32 ssa_65 = ffma ssa_51.z, ssa_63.y, ssa_60 vec1 32 ssa_66 = ffma ssa_51.z, ssa_63.z, ssa_61 vec1 32 ssa_67 = load_const (0x00000830 /* 0.000000 */) vec4 32 ssa_68 = intrinsic load_ubo (ssa_9, ssa_67) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_69 = fadd ssa_64, ssa_68.x vec1 32 ssa_70 = fadd ssa_65, ssa_68.y vec1 32 ssa_71 = fadd ssa_66, ssa_68.z vec1 32 ssa_72 = fadd -ssa_69, ssa_2.x vec1 32 ssa_73 = fadd -ssa_70, ssa_2.y vec1 32 ssa_74 = fadd -ssa_71, ssa_2.z vec1 32 ssa_75 = frcp abs(ssa_51.w) vec1 32 ssa_76 = fmul ssa_72, ssa_75 vec1 32 ssa_77 = fmul ssa_73, ssa_75 vec1 32 ssa_78 = fmul ssa_74, ssa_75 vec1 32 ssa_79 = fmul ssa_77, ssa_77 vec1 32 ssa_80 = ffma ssa_76, ssa_76, ssa_79 vec1 32 ssa_81 = ffma ssa_78, ssa_78, ssa_80 vec1 32 ssa_82 = fmin ssa_81, ssa_8 vec1 32 ssa_83 = fmin ssa_82, ssa_43 vec1 32 ssa_84 = iadd ssa_44, ssa_11 /* succs: block_1 */ } block block_5: /* preds: block_2 */ /* succs: block_6 */ loop { block block_6: /* preds: block_5 block_9 */ vec1 32 ssa_85 = phi block_5: ssa_1, block_9: ssa_126 vec1 32 ssa_86 = phi block_5: ssa_1, block_9: ssa_127 vec1 32 ssa_87 = i2f32 ssa_86 vec1 32 ssa_88 = fge ssa_87, ssa_47.x /* succs: block_7 block_8 */ if ssa_88 { block block_7: /* preds: block_6 */ break /* succs: block_10 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_8 */ vec1 32 ssa_89 = ishl ssa_86, ssa_9 vec4 32 ssa_90 = intrinsic load_ubo (ssa_9, ssa_89) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_91 = load_const (0x00000810 /* 0.000000 */) vec4 32 ssa_92 = intrinsic load_ubo (ssa_9, ssa_91) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_93 = fmul ssa_90.y, ssa_92.x vec1 32 ssa_94 = fmul ssa_90.y, ssa_92.y vec1 32 ssa_95 = fmul ssa_90.y, ssa_92.z vec1 32 ssa_96 = load_const (0x00000800 /* 0.000000 */) vec4 32 ssa_97 = intrinsic load_ubo (ssa_9, ssa_96) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_98 = ffma ssa_90.x, ssa_97.x, ssa_93 vec1 32 ssa_99 = ffma ssa_90.x, ssa_97.y, ssa_94 vec1 32 ssa_100 = ffma ssa_90.x, ssa_97.z, ssa_95 vec1 32 ssa_101 = load_const (0x00000820 /* 0.000000 */) vec4 32 ssa_102 = intrinsic load_ubo (ssa_9, ssa_101) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_103 = ffma ssa_90.z, ssa_102.x, ssa_98 vec1 32 ssa_104 = ffma ssa_90.z, ssa_102.y, ssa_99 vec1 32 ssa_105 = ffma ssa_90.z, ssa_102.z, ssa_100 vec1 32 ssa_106 = load_const (0x00000830 /* 0.000000 */) vec4 32 ssa_107 = intrinsic load_ubo (ssa_9, ssa_106) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_108 = fadd ssa_103, ssa_107.x vec1 32 ssa_109 = fadd ssa_104, ssa_107.y vec1 32 ssa_110 = fadd ssa_105, ssa_107.z vec1 32 ssa_111 = fadd -ssa_108, ssa_2.x vec1 32 ssa_112 = fadd -ssa_109, ssa_2.y vec1 32 ssa_113 = fadd -ssa_110, ssa_2.z vec1 32 ssa_114 = frcp ssa_90.w vec1 32 ssa_115 = fmul ssa_111, ssa_114 vec1 32 ssa_116 = fmul ssa_112, ssa_114 vec1 32 ssa_117 = fmul ssa_113, ssa_114 vec1 32 ssa_118 = fmul ssa_116, ssa_116 vec1 32 ssa_119 = ffma ssa_115, ssa_115, ssa_118 vec1 32 ssa_120 = ffma ssa_117, ssa_117, ssa_119 vec1 32 ssa_121 = fadd -ssa_120, ssa_8 vec1 32 ssa_122 = fmax ssa_121, ssa_1 vec1 32 ssa_123 = fadd ssa_122, ssa_122 vec1 32 ssa_124 = fmul ssa_122, ssa_123 vec1 32 ssa_125 = fmin ssa_124, ssa_8 vec1 32 ssa_126 = fmax ssa_85, ssa_125 vec1 32 ssa_127 = iadd ssa_86, ssa_11 /* succs: block_6 */ } block block_10: /* preds: block_7 */ vec1 32 ssa_128 = fadd ssa_43, ssa_85 vec1 32 ssa_129 = fadd.sat ssa_128, ssa_3.w vec1 32 ssa_130 = flt ssa_129, -ssa_12 vec4 32 ssa_131 = intrinsic load_uniform (ssa_1) (0, 16) /* base=0 */ /* range=16 */ /* u7 */ vec4 32 ssa_132 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_133 = fadd ssa_4.x, ssa_132.x vec1 32 ssa_134 = fadd ssa_4.y, ssa_132.y vec1 32 ssa_135 = ffma ssa_134, ssa_131.x, ssa_131.y vec1 32 ssa_136 = f2u32 ssa_133 vec1 32 ssa_137 = f2u32 ssa_135 vec1 32 ssa_138 = load_const (0x000001a0 /* 0.000000 */) vec4 32 ssa_139 = intrinsic load_ubo (ssa_1, ssa_138) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_140 = f2u32 ssa_139.z vec1 32 ssa_141 = ushr ssa_136, ssa_11 vec1 32 ssa_142 = ushr ssa_137, ssa_11 vec1 32 ssa_143 = ushr ssa_140, ssa_11 vec1 32 ssa_144 = imul ssa_142, ssa_143 vec1 32 ssa_145 = iadd ssa_144, ssa_141 vec1 32 ssa_146 = ishl ssa_145, ssa_13 vec1 32 ssa_147 = ishl ssa_146, ssa_7 vec1 32 ssa_148 = bfi ssa_25, ssa_136, ssa_147 vec1 32 ssa_149 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_150 = bfi ssa_149, ssa_137, ssa_148 vec1 32 ssa_151 = ushr ssa_150, ssa_13 vec1 32 ssa_152 = ishl ssa_151, ssa_13 vec1 32 ssa_153 = intrinsic load_ssbo (ssa_13, ssa_152) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_154 = iadd ssa_151, ssa_11 vec1 32 ssa_155 = ishl ssa_154, ssa_13 vec1 32 ssa_156 = intrinsic load_ssbo (ssa_13, ssa_155) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_157 = feq ssa_156, ssa_4.z vec1 32 ssa_158 = bcsel ssa_157, ssa_8, ssa_14 vec1 32 ssa_159 = feq ssa_153, ssa_4.z vec1 32 ssa_160 = bcsel ssa_159, ssa_1, ssa_158 vec1 32 ssa_161 = fge ssa_160, ssa_1 vec1 32 ssa_162 = flt ssa_15, ssa_42 vec1 32 ssa_163 = iand ssa_162, ssa_161 /* succs: block_11 block_21 */ if ssa_163 { block block_11: /* preds: block_10 */ vec1 32 ssa_164 = load_const (0x00000050 /* 0.000000 */) vec4 32 ssa_165 = intrinsic load_ubo (ssa_7, ssa_164) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_166 = fmul ssa_37.x, ssa_165.z vec1 32 ssa_167 = fmul ssa_37.y, ssa_165.w vec1 32 ssa_168 = fmul ssa_37.x, ssa_165.x vec1 32 ssa_169 = fmul ssa_37.y, ssa_165.y vec2 32 ssa_170 = vec2 ssa_166, ssa_167 vec4 32 ssa_171 = tex ssa_170 (coord), 1 (texture), 1 (sampler), vec1 32 ssa_172 = load_const (0x00000040 /* 0.000000 */) vec4 32 ssa_173 = intrinsic load_ubo (ssa_7, ssa_172) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_174 = fmul ssa_171.x, ssa_173.x vec1 32 ssa_175 = fmul ssa_171.y, ssa_173.y vec2 32 ssa_176 = vec2 ssa_168, ssa_169 vec4 32 ssa_177 = tex ssa_176 (coord), 2 (texture), 2 (sampler), vec4 32 ssa_178 = intrinsic load_ubo (ssa_7, ssa_149) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_179 = fmul ssa_175, ssa_178.x vec1 32 ssa_180 = fmul ssa_175, ssa_178.y vec1 32 ssa_181 = fmul ssa_175, ssa_178.z vec4 32 ssa_182 = intrinsic load_ubo (ssa_7, ssa_1) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_183 = ffma ssa_177.x, ssa_182.x, ssa_179 vec1 32 ssa_184 = ffma ssa_177.y, ssa_182.y, ssa_180 vec1 32 ssa_185 = ffma ssa_177.z, ssa_182.z, ssa_181 vec4 32 ssa_186 = intrinsic load_ubo (ssa_7, ssa_10) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_187 = ffma.sat ssa_174, ssa_186.x, ssa_183 vec1 32 ssa_188 = ffma.sat ssa_174, ssa_186.y, ssa_184 vec1 32 ssa_189 = ffma.sat ssa_174, ssa_186.z, ssa_185 vec1 32 ssa_190 = fmul ssa_37.x, ssa_36.z vec1 32 ssa_191 = fmul ssa_37.y, ssa_36.w vec2 32 ssa_192 = vec2 ssa_190, ssa_191 vec4 32 ssa_193 = tex ssa_192 (coord), 3 (texture), 3 (sampler), vec1 32 ssa_194 = fmul ssa_2.y, ssa_2.y vec1 32 ssa_195 = ffma ssa_2.x, ssa_2.x, ssa_194 vec1 32 ssa_196 = ffma ssa_2.z, ssa_2.z, ssa_195 vec1 32 ssa_197 = frsq ssa_196 vec1 32 ssa_198 = fmul ssa_197, ssa_2.x vec1 32 ssa_199 = fmul ssa_197, ssa_2.y vec1 32 ssa_200 = fmul ssa_197, ssa_2.z vec1 32 ssa_201 = fmul ssa_5.y, ssa_5.y vec1 32 ssa_202 = ffma ssa_5.x, ssa_5.x, ssa_201 vec1 32 ssa_203 = ffma ssa_5.z, ssa_5.z, ssa_202 vec1 32 ssa_204 = frsq ssa_203 vec1 32 ssa_205 = fmul ssa_204, ssa_5.x vec1 32 ssa_206 = fmul ssa_204, ssa_5.y vec1 32 ssa_207 = fmul ssa_204, ssa_5.z vec1 32 ssa_208 = load_const (0x00000070 /* 0.000000 */) vec4 32 ssa_209 = intrinsic load_ubo (ssa_7, ssa_208) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_210 = fmul.sat ssa_193.x, ssa_209.w vec1 32 ssa_211 = fadd abs(ssa_6.y), abs(ssa_6.x) vec1 32 ssa_212 = fadd ssa_211, abs(ssa_6.z) vec1 32 ssa_213 = frcp ssa_212 vec1 32 ssa_214 = fmul ssa_6.x, ssa_213 vec1 32 ssa_215 = fmul ssa_6.y, ssa_213 vec1 32 ssa_216 = fmov -ssa_214 vec1 32 ssa_217 = fmov -ssa_215 vec1 32 ssa_218 = fadd -abs(ssa_215), ssa_8 vec1 32 ssa_219 = fadd -abs(ssa_214), ssa_8 vec1 32 ssa_220 = fge -ssa_214, ssa_1 vec1 32 ssa_221 = bcsel ssa_220, ssa_8, ssa_14 vec1 32 ssa_222 = fge -ssa_215, ssa_1 vec1 32 ssa_223 = bcsel ssa_222, ssa_8, ssa_14 vec1 32 ssa_224 = fmul ssa_218, ssa_221 vec1 32 ssa_225 = fmul ssa_219, ssa_223 vec1 32 ssa_226 = fge ssa_1, -ssa_6.z vec1 32 ssa_227 = bcsel ssa_226, ssa_224, ssa_216 vec1 32 ssa_228 = bcsel ssa_226, ssa_225, ssa_217 vec1 32 ssa_229 = fmul ssa_6.y, ssa_199 vec1 32 ssa_230 = ffma -ssa_6.x, ssa_198, -ssa_229 vec1 32 ssa_231 = ffma -ssa_6.z, ssa_200, ssa_230 vec1 32 ssa_232 = ffma -ssa_231, -ssa_6.x, ssa_198 vec1 32 ssa_233 = ffma -ssa_231, -ssa_6.y, ssa_199 vec1 32 ssa_234 = ffma -ssa_231, -ssa_6.z, ssa_200 vec1 32 ssa_235 = fmul ssa_233, ssa_233 vec1 32 ssa_236 = ffma ssa_232, ssa_232, ssa_235 vec1 32 ssa_237 = ffma ssa_234, ssa_234, ssa_236 vec1 32 ssa_238 = frsq ssa_237 vec1 32 ssa_239 = fmul ssa_238, ssa_232 vec1 32 ssa_240 = fmul ssa_238, ssa_233 vec1 32 ssa_241 = fmul ssa_238, ssa_234 vec1 32 ssa_242 = fmul ssa_6.z, ssa_240 vec1 32 ssa_243 = fmul ssa_6.x, ssa_241 vec1 32 ssa_244 = fmul ssa_6.y, ssa_239 vec1 32 ssa_245 = ffma -ssa_6.y, ssa_241, ssa_242 vec1 32 ssa_246 = ffma -ssa_6.z, ssa_239, ssa_243 vec1 32 ssa_247 = ffma -ssa_6.x, ssa_240, ssa_244 vec1 32 ssa_248 = fmul ssa_206, ssa_240 vec1 32 ssa_249 = ffma ssa_205, ssa_239, ssa_248 vec1 32 ssa_250 = ffma ssa_207, ssa_241, ssa_249 vec1 32 ssa_251 = fmul ssa_206, ssa_246 vec1 32 ssa_252 = ffma ssa_205, ssa_245, ssa_251 vec1 32 ssa_253 = ffma ssa_207, ssa_247, ssa_252 vec1 32 ssa_254 = fadd -abs(ssa_250), ssa_8 vec1 32 ssa_255 = fsqrt ssa_254 vec1 32 ssa_256 = ffma abs(ssa_250), ssa_16, ssa_17 vec1 32 ssa_257 = ffma ssa_256, abs(ssa_250), ssa_18 vec1 32 ssa_258 = ffma ssa_257, abs(ssa_250), ssa_19 vec1 32 ssa_259 = fmul ssa_255, ssa_20 vec1 32 ssa_260 = fmul ssa_259, ssa_258 vec1 32 ssa_261 = fmin ssa_260, ssa_8 vec1 32 ssa_262 = fmul ssa_261, ssa_21 vec1 32 ssa_263 = f2u32 ssa_262 vec1 32 ssa_264 = ffma.sat ssa_227, ssa_22, ssa_22 vec1 32 ssa_265 = ffma.sat ssa_228, ssa_22, ssa_22 vec1 32 ssa_266 = fmul ssa_264, ssa_23 vec1 32 ssa_267 = fmul ssa_265, ssa_23 vec1 32 ssa_268 = f2u32 ssa_266 vec1 32 ssa_269 = f2u32 ssa_267 vec1 32 ssa_270 = ishl ssa_268, ssa_24 vec1 32 ssa_271 = ishl ssa_269, ssa_25 vec1 32 ssa_272 = load_const (0x0000001a /* 0.000000 */) vec1 32 ssa_273 = ishl ssa_263, ssa_272 vec1 32 ssa_274 = iadd ssa_273, ssa_270 vec1 32 ssa_275 = iadd ssa_274, ssa_271 vec1 32 ssa_276 = fadd -ssa_42, ssa_8 vec1 32 ssa_277 = fmin ssa_276, ssa_8 vec1 32 ssa_278 = fmul ssa_277, ssa_26 vec1 32 ssa_279 = f2u32 ssa_278 vec1 32 ssa_280 = ishl ssa_279, ssa_11 vec1 32 ssa_281 = iand ssa_280, ssa_27 vec1 32 ssa_282 = iadd ssa_275, ssa_281 vec1 32 ssa_283 = fge ssa_253, ssa_1 vec1 32 ssa_284 = bfi ssa_11, ssa_283, ssa_282 vec1 32 ssa_285 = fmul ssa_187, ssa_26 vec1 32 ssa_286 = fmul ssa_188, ssa_26 vec1 32 ssa_287 = fmul ssa_189, ssa_21 vec1 32 ssa_288 = f2u32 ssa_285 vec1 32 ssa_289 = f2u32 ssa_286 vec1 32 ssa_290 = f2u32 ssa_287 vec1 32 ssa_291 = ishl ssa_289, ssa_28 vec1 32 ssa_292 = ishl ssa_290, ssa_29 vec1 32 ssa_293 = load_const (0x00000019 /* 0.000000 */) vec1 32 ssa_294 = ishl ssa_288, ssa_293 vec1 32 ssa_295 = iadd ssa_294, ssa_291 vec1 32 ssa_296 = iadd ssa_295, ssa_292 vec1 32 ssa_297 = fmul ssa_210, ssa_21 vec1 32 ssa_298 = f2u32 ssa_297 vec1 32 ssa_299 = ishl ssa_298, ssa_30 vec1 32 ssa_300 = iadd ssa_296, ssa_299 vec4 32 ssa_301 = intrinsic load_ubo (ssa_11, ssa_1) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_302 = load_const (0x0000003f /* 0.000000 */) vec1 32 ssa_303 = bfi ssa_302, ssa_301.x, ssa_300 vec1 32 ssa_304 = f2u32 ssa_160 vec1 32 ssa_305 = ishl ssa_146, ssa_11 vec1 32 ssa_306 = bfi ssa_13, ssa_136, ssa_305 vec1 32 ssa_307 = bfi ssa_9, ssa_137, ssa_306 vec1 32 ssa_308 = iadd ssa_304, ssa_307 vec1 32 ssa_309 = ieq ssa_130, ssa_1 /* succs: block_12 block_13 */ if ssa_309 { block block_12: /* preds: block_11 */ vec1 32 ssa_310 = ishl ssa_308, ssa_9 vec2 32 ssa_311 = vec2 ssa_284, ssa_303 intrinsic store_ssbo (ssa_311, ssa_11, ssa_310) (3, 0, 16, 0) /* wrmask=xy */ /* access=0 */ /* align_mul=16 */ /* align_offset=0 */ /* succs: block_14 */ } else { block block_13: /* preds: block_11 */ /* succs: block_14 */ } block block_14: /* preds: block_12 block_13 */ /* succs: block_15 block_16 */ if ssa_309 { block block_15: /* preds: block_14 */ vec1 32 ssa_312 = imov ssa_301.y intrinsic store_ssbo (ssa_312, ssa_1, ssa_152) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_17 */ } else { block block_16: /* preds: block_14 */ /* succs: block_17 */ } block block_17: /* preds: block_15 block_16 */ /* succs: block_18 block_19 */ if ssa_309 { block block_18: /* preds: block_17 */ vec1 32 ssa_313 = imov ssa_301.z intrinsic store_ssbo (ssa_313, ssa_1, ssa_155) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_20 */ } else { block block_19: /* preds: block_17 */ /* succs: block_20 */ } block block_20: /* preds: block_18 block_19 */ /* succs: block_22 */ } else { block block_21: /* preds: block_10 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec4 32 ssa_314 = intrinsic load_ubo (ssa_13, ssa_1) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_315 = fmul ssa_42, ssa_314.x vec1 32 ssa_316 = fmul ssa_42, ssa_314.y vec1 32 ssa_317 = fmul ssa_42, ssa_314.z vec1 32 ssa_318 = fmul ssa_315, ssa_314.w vec1 32 ssa_319 = fmul ssa_316, ssa_314.w vec1 32 ssa_320 = fmul ssa_317, ssa_314.w vec1 32 ssa_321 = fmov -ssa_2.z vec1 32 ssa_322 = fadd -ssa_2.z, ssa_31 vec1 32 ssa_323 = fmax ssa_322, ssa_1 vec1 32 ssa_324 = fmov.sat ssa_41.w vec1 32 ssa_325 = fmul ssa_323, ssa_32 vec1 32 ssa_326 = ffma -ssa_325, ssa_325, ssa_8 vec1 32 ssa_327 = fmax ssa_326, ssa_1 vec1 32 ssa_328 = fmul ssa_327, ssa_324 intrinsic discard_if (ssa_130) () vec4 32 ssa_329 = vec4 ssa_318, ssa_319, ssa_320, ssa_42 intrinsic store_output (ssa_329, ssa_1) (8, 15, 0) /* base=8 */ /* wrmask=xyzw */ /* component=0 */ /* o0 */ vec4 32 ssa_330 = vec4 ssa_321, ssa_33, ssa_33, ssa_328 intrinsic store_output (ssa_330, ssa_1) (10, 15, 0) /* base=10 */ /* wrmask=xyzw */ /* component=0 */ /* o1 */ intrinsic store_output (ssa_34, ssa_1) (12, 15, 0) /* base=12 */ /* wrmask=xyzw */ /* component=0 */ /* o2 */ /* succs: block_23 */ block block_23: } NIR (final form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: GLSL1 inputs: 0 outputs: 0 uniforms: 16 shared: 0 decl_var shader_storage INTERP_MODE_NONE restrict writeonly uint[] u7 (0, 0, 0) decl_var shader_storage INTERP_MODE_NONE restrict writeonly uvec4[] u5 (0, 0, 0) decl_var shader_storage INTERP_MODE_NONE restrict readonly uint[] u4 (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps11 idx_uniforms11_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps13 idx_uniforms13_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps12 idx_uniforms12_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps6 idx_uniforms6_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE cb_ps0 idx_uniforms0_ps (0, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_0_ps (8, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_1_ps (9, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_2_ps (10, 0, 0) decl_var uniform INTERP_MODE_NONE sampler2D resourceSamplerPair_3_ps (11, 0, 0) decl_var uniform INTERP_MODE_NONE vec4 gl_FbWposYTransform (0, 0, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 gl_FragCoord (VARYING_SLOT_POS, 0, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v (VARYING_SLOT_VAR1, 32, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@0 (VARYING_SLOT_VAR2, 33, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@1 (VARYING_SLOT_VAR3, 34, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@2 (VARYING_SLOT_VAR4, 35, 0) decl_var shader_in INTERP_MODE_SMOOTH vec4 v@3 (VARYING_SLOT_VAR6, 37, 0) decl_var shader_out INTERP_MODE_NONE vec4 o0 (FRAG_RESULT_DATA0, 8, 0) decl_var shader_out INTERP_MODE_NONE vec4 o1 (FRAG_RESULT_DATA1, 10, 0) decl_var shader_out INTERP_MODE_NONE vec4 o2 (FRAG_RESULT_DATA2, 12, 0) decl_function main (0 params) impl main { decl_reg vec1 32 r0 decl_reg vec1 32 r1 decl_reg vec1 32 r2 decl_reg vec1 32 r3 block block_0: /* preds: */ vec2 32 ssa_0 = intrinsic load_barycentric_pixel () (1) /* interp_mode=1 */ vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2 = intrinsic load_interpolated_input (ssa_0, ssa_1) (32, 0) /* base=32 */ /* component=0 */ vec4 32 ssa_3 = intrinsic load_interpolated_input (ssa_0, ssa_1) (37, 0) /* base=37 */ /* component=0 */ vec4 32 ssa_4 = intrinsic load_interpolated_input (ssa_0, ssa_1) (0, 0) /* base=0 */ /* component=0 */ vec4 32 ssa_5 = intrinsic load_interpolated_input (ssa_0, ssa_1) (34, 0) /* base=34 */ /* component=0 */ vec4 32 ssa_6 = intrinsic load_interpolated_input (ssa_0, ssa_1) (35, 0) /* base=35 */ /* component=0 */ vec1 32 ssa_7 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_8 = load_const (0x3f800000 /* 1.000000 */) vec1 32 ssa_9 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_11 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_12 = load_const (0xbf7d70a4 /* -0.990000 */) vec1 32 ssa_13 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_14 = load_const (0xbf800000 /* -1.000000 */) vec1 32 ssa_15 = load_const (0x3ca3d70a /* 0.020000 */) vec1 32 ssa_16 = load_const (0xbc996e30 /* -0.018729 */) vec1 32 ssa_17 = load_const (0x3d981627 /* 0.074261 */) vec1 32 ssa_18 = load_const (0xbe593484 /* -0.212114 */) vec1 32 ssa_19 = load_const (0x3fc90da4 /* 1.570729 */) vec1 32 ssa_20 = load_const (0x3f22f984 /* 0.636620 */) vec1 32 ssa_21 = load_const (0x427c0000 /* 63.000000 */) vec1 32 ssa_22 = load_const (0x3f000000 /* 0.500000 */) vec1 32 ssa_23 = load_const (0x43ff8000 /* 511.000000 */) vec1 32 ssa_24 = load_const (0x00000011 /* 0.000000 */) vec1 32 ssa_25 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_26 = load_const (0x42fe0000 /* 127.000000 */) vec1 32 ssa_27 = load_const (0x000000fc /* 0.000000 */) vec1 32 ssa_28 = load_const (0x00000012 /* 0.000000 */) vec1 32 ssa_29 = load_const (0x0000000c /* 0.000000 */) vec1 32 ssa_30 = load_const (0x00000006 /* 0.000000 */) vec1 32 ssa_31 = load_const (0xc1f00000 /* -30.000000 */) vec1 32 ssa_32 = load_const (0x3c888889 /* 0.016667 */) vec1 32 ssa_33 = load_const (0x477fe000 /* 65504.000000 */) vec4 32 ssa_34 = load_const (0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_35 = load_const (0x00000060 /* 0.000000 */) vec4 32 ssa_36 = intrinsic load_ubo (ssa_7, ssa_35) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec4 32 ssa_37 = intrinsic load_interpolated_input (ssa_0, ssa_1) (33, 0) /* base=33 */ /* component=0 */ vec1 32 ssa_38 = fmul ssa_37.x, ssa_36.x vec1 32 ssa_39 = fmul ssa_37.y, ssa_36.y vec2 32 ssa_40 = vec2 ssa_38, ssa_39 vec4 32 ssa_41 = tex ssa_40 (coord), 0 (texture), 0 (sampler), vec1 32 ssa_42 = fmin ssa_41.w, ssa_8 r1 = imov ssa_1 r0 = imov ssa_8 /* succs: block_1 */ loop { block block_1: /* preds: block_0 block_4 */ vec1 32 ssa_45 = i2f32 r1 vec1 32 ssa_46 = load_const (0x00000880 /* 0.000000 */) vec4 32 ssa_47 = intrinsic load_ubo (ssa_9, ssa_46) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_48 = fge ssa_45, ssa_47.z /* succs: block_2 block_3 */ if ssa_48 { block block_2: /* preds: block_1 */ break /* succs: block_5 */ } else { block block_3: /* preds: block_1 */ /* succs: block_4 */ } block block_4: /* preds: block_3 */ vec1 32 ssa_49 = iadd ssa_10, r1 vec1 32 ssa_50 = ishl ssa_49, ssa_9 vec4 32 ssa_51 = intrinsic load_ubo (ssa_9, ssa_50) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_52 = load_const (0x00000810 /* 0.000000 */) vec4 32 ssa_53 = intrinsic load_ubo (ssa_9, ssa_52) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_54 = fmul ssa_51.y, ssa_53.x vec1 32 ssa_55 = fmul ssa_51.y, ssa_53.y vec1 32 ssa_56 = fmul ssa_51.y, ssa_53.z vec1 32 ssa_57 = load_const (0x00000800 /* 0.000000 */) vec4 32 ssa_58 = intrinsic load_ubo (ssa_9, ssa_57) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_59 = ffma ssa_51.x, ssa_58.x, ssa_54 vec1 32 ssa_60 = ffma ssa_51.x, ssa_58.y, ssa_55 vec1 32 ssa_61 = ffma ssa_51.x, ssa_58.z, ssa_56 vec1 32 ssa_62 = load_const (0x00000820 /* 0.000000 */) vec4 32 ssa_63 = intrinsic load_ubo (ssa_9, ssa_62) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_64 = ffma ssa_51.z, ssa_63.x, ssa_59 vec1 32 ssa_65 = ffma ssa_51.z, ssa_63.y, ssa_60 vec1 32 ssa_66 = ffma ssa_51.z, ssa_63.z, ssa_61 vec1 32 ssa_67 = load_const (0x00000830 /* 0.000000 */) vec4 32 ssa_68 = intrinsic load_ubo (ssa_9, ssa_67) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_69 = fadd ssa_64, ssa_68.x vec1 32 ssa_70 = fadd ssa_65, ssa_68.y vec1 32 ssa_71 = fadd ssa_66, ssa_68.z vec1 32 ssa_72 = fadd -ssa_69, ssa_2.x vec1 32 ssa_73 = fadd -ssa_70, ssa_2.y vec1 32 ssa_74 = fadd -ssa_71, ssa_2.z vec1 32 ssa_75 = frcp abs(ssa_51.w) vec1 32 ssa_76 = fmul ssa_72, ssa_75 vec1 32 ssa_77 = fmul ssa_73, ssa_75 vec1 32 ssa_78 = fmul ssa_74, ssa_75 vec1 32 ssa_79 = fmul ssa_77, ssa_77 vec1 32 ssa_80 = ffma ssa_76, ssa_76, ssa_79 vec1 32 ssa_81 = ffma ssa_78, ssa_78, ssa_80 vec1 32 ssa_82 = fmin ssa_81, ssa_8 r0 = fmin ssa_82, r0 r1 = iadd r1, ssa_11 /* succs: block_1 */ } block block_5: /* preds: block_2 */ r3 = imov ssa_1 r2 = imov r3 /* succs: block_6 */ loop { block block_6: /* preds: block_5 block_9 */ vec1 32 ssa_87 = i2f32 r3 vec1 32 ssa_88 = fge ssa_87, ssa_47.x /* succs: block_7 block_8 */ if ssa_88 { block block_7: /* preds: block_6 */ break /* succs: block_10 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_8 */ vec1 32 ssa_89 = ishl r3, ssa_9 vec4 32 ssa_90 = intrinsic load_ubo (ssa_9, ssa_89) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_91 = load_const (0x00000810 /* 0.000000 */) vec4 32 ssa_92 = intrinsic load_ubo (ssa_9, ssa_91) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_93 = fmul ssa_90.y, ssa_92.x vec1 32 ssa_94 = fmul ssa_90.y, ssa_92.y vec1 32 ssa_95 = fmul ssa_90.y, ssa_92.z vec1 32 ssa_96 = load_const (0x00000800 /* 0.000000 */) vec4 32 ssa_97 = intrinsic load_ubo (ssa_9, ssa_96) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_98 = ffma ssa_90.x, ssa_97.x, ssa_93 vec1 32 ssa_99 = ffma ssa_90.x, ssa_97.y, ssa_94 vec1 32 ssa_100 = ffma ssa_90.x, ssa_97.z, ssa_95 vec1 32 ssa_101 = load_const (0x00000820 /* 0.000000 */) vec4 32 ssa_102 = intrinsic load_ubo (ssa_9, ssa_101) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_103 = ffma ssa_90.z, ssa_102.x, ssa_98 vec1 32 ssa_104 = ffma ssa_90.z, ssa_102.y, ssa_99 vec1 32 ssa_105 = ffma ssa_90.z, ssa_102.z, ssa_100 vec1 32 ssa_106 = load_const (0x00000830 /* 0.000000 */) vec4 32 ssa_107 = intrinsic load_ubo (ssa_9, ssa_106) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_108 = fadd ssa_103, ssa_107.x vec1 32 ssa_109 = fadd ssa_104, ssa_107.y vec1 32 ssa_110 = fadd ssa_105, ssa_107.z vec1 32 ssa_111 = fadd -ssa_108, ssa_2.x vec1 32 ssa_112 = fadd -ssa_109, ssa_2.y vec1 32 ssa_113 = fadd -ssa_110, ssa_2.z vec1 32 ssa_114 = frcp ssa_90.w vec1 32 ssa_115 = fmul ssa_111, ssa_114 vec1 32 ssa_116 = fmul ssa_112, ssa_114 vec1 32 ssa_117 = fmul ssa_113, ssa_114 vec1 32 ssa_118 = fmul ssa_116, ssa_116 vec1 32 ssa_119 = ffma ssa_115, ssa_115, ssa_118 vec1 32 ssa_120 = ffma ssa_117, ssa_117, ssa_119 vec1 32 ssa_121 = fadd -ssa_120, ssa_8 vec1 32 ssa_122 = fmax ssa_121, ssa_1 vec1 32 ssa_123 = fadd ssa_122, ssa_122 vec1 32 ssa_124 = fmul ssa_122, ssa_123 vec1 32 ssa_125 = fmin ssa_124, ssa_8 r2 = fmax r2, ssa_125 r3 = iadd r3, ssa_11 /* succs: block_6 */ } block block_10: /* preds: block_7 */ vec1 32 ssa_128 = fadd r0, r2 vec1 32 ssa_129 = fadd.sat ssa_128, ssa_3.w vec1 32 ssa_130 = flt ssa_129, -ssa_12 vec4 32 ssa_131 = intrinsic load_uniform (ssa_1) (0, 16) /* base=0 */ /* range=16 */ /* u7 */ vec4 32 ssa_132 = load_const (0x3f000000 /* 0.500000 */, 0x3f000000 /* 0.500000 */, 0x00000000 /* 0.000000 */, 0x00000000 /* 0.000000 */) vec1 32 ssa_133 = fadd ssa_4.x, ssa_132.x vec1 32 ssa_134 = fadd ssa_4.y, ssa_132.y vec1 32 ssa_135 = ffma ssa_134, ssa_131.x, ssa_131.y vec1 32 ssa_136 = f2u32 ssa_133 vec1 32 ssa_137 = f2u32 ssa_135 vec1 32 ssa_138 = load_const (0x000001a0 /* 0.000000 */) vec4 32 ssa_139 = intrinsic load_ubo (ssa_1, ssa_138) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_140 = f2u32 ssa_139.z vec1 32 ssa_141 = ushr ssa_136, ssa_11 vec1 32 ssa_142 = ushr ssa_137, ssa_11 vec1 32 ssa_143 = ushr ssa_140, ssa_11 vec1 32 ssa_144 = imul ssa_142, ssa_143 vec1 32 ssa_145 = iadd ssa_144, ssa_141 vec1 32 ssa_146 = ishl ssa_145, ssa_13 vec1 32 ssa_147 = ishl ssa_146, ssa_7 vec1 32 ssa_148 = bfi ssa_25, ssa_136, ssa_147 vec1 32 ssa_149 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_150 = bfi ssa_149, ssa_137, ssa_148 vec1 32 ssa_151 = ushr ssa_150, ssa_13 vec1 32 ssa_152 = ishl ssa_151, ssa_13 vec1 32 ssa_153 = intrinsic load_ssbo (ssa_13, ssa_152) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_154 = iadd ssa_151, ssa_11 vec1 32 ssa_155 = ishl ssa_154, ssa_13 vec1 32 ssa_156 = intrinsic load_ssbo (ssa_13, ssa_155) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_157 = feq ssa_156, ssa_4.z vec1 32 ssa_158 = bcsel ssa_157, ssa_8, ssa_14 vec1 32 ssa_159 = feq ssa_153, ssa_4.z vec1 32 ssa_160 = bcsel ssa_159, ssa_1, ssa_158 vec1 32 ssa_161 = fge ssa_160, ssa_1 vec1 32 ssa_162 = flt ssa_15, ssa_42 vec1 32 ssa_163 = iand ssa_162, ssa_161 /* succs: block_11 block_21 */ if ssa_163 { block block_11: /* preds: block_10 */ vec1 32 ssa_164 = load_const (0x00000050 /* 0.000000 */) vec4 32 ssa_165 = intrinsic load_ubo (ssa_7, ssa_164) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_166 = fmul ssa_37.x, ssa_165.z vec1 32 ssa_167 = fmul ssa_37.y, ssa_165.w vec1 32 ssa_168 = fmul ssa_37.x, ssa_165.x vec1 32 ssa_169 = fmul ssa_37.y, ssa_165.y vec2 32 ssa_170 = vec2 ssa_166, ssa_167 vec4 32 ssa_171 = tex ssa_170 (coord), 1 (texture), 1 (sampler), vec1 32 ssa_172 = load_const (0x00000040 /* 0.000000 */) vec4 32 ssa_173 = intrinsic load_ubo (ssa_7, ssa_172) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_174 = fmul ssa_171.x, ssa_173.x vec1 32 ssa_175 = fmul ssa_171.y, ssa_173.y vec2 32 ssa_176 = vec2 ssa_168, ssa_169 vec4 32 ssa_177 = tex ssa_176 (coord), 2 (texture), 2 (sampler), vec4 32 ssa_178 = intrinsic load_ubo (ssa_7, ssa_149) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_179 = fmul ssa_175, ssa_178.x vec1 32 ssa_180 = fmul ssa_175, ssa_178.y vec1 32 ssa_181 = fmul ssa_175, ssa_178.z vec4 32 ssa_182 = intrinsic load_ubo (ssa_7, ssa_1) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_183 = ffma ssa_177.x, ssa_182.x, ssa_179 vec1 32 ssa_184 = ffma ssa_177.y, ssa_182.y, ssa_180 vec1 32 ssa_185 = ffma ssa_177.z, ssa_182.z, ssa_181 vec4 32 ssa_186 = intrinsic load_ubo (ssa_7, ssa_10) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_187 = ffma.sat ssa_174, ssa_186.x, ssa_183 vec1 32 ssa_188 = ffma.sat ssa_174, ssa_186.y, ssa_184 vec1 32 ssa_189 = ffma.sat ssa_174, ssa_186.z, ssa_185 vec1 32 ssa_190 = fmul ssa_37.x, ssa_36.z vec1 32 ssa_191 = fmul ssa_37.y, ssa_36.w vec2 32 ssa_192 = vec2 ssa_190, ssa_191 vec4 32 ssa_193 = tex ssa_192 (coord), 3 (texture), 3 (sampler), vec1 32 ssa_194 = fmul ssa_2.y, ssa_2.y vec1 32 ssa_195 = ffma ssa_2.x, ssa_2.x, ssa_194 vec1 32 ssa_196 = ffma ssa_2.z, ssa_2.z, ssa_195 vec1 32 ssa_197 = frsq ssa_196 vec1 32 ssa_198 = fmul ssa_197, ssa_2.x vec1 32 ssa_199 = fmul ssa_197, ssa_2.y vec1 32 ssa_200 = fmul ssa_197, ssa_2.z vec1 32 ssa_201 = fmul ssa_5.y, ssa_5.y vec1 32 ssa_202 = ffma ssa_5.x, ssa_5.x, ssa_201 vec1 32 ssa_203 = ffma ssa_5.z, ssa_5.z, ssa_202 vec1 32 ssa_204 = frsq ssa_203 vec1 32 ssa_205 = fmul ssa_204, ssa_5.x vec1 32 ssa_206 = fmul ssa_204, ssa_5.y vec1 32 ssa_207 = fmul ssa_204, ssa_5.z vec1 32 ssa_208 = load_const (0x00000070 /* 0.000000 */) vec4 32 ssa_209 = intrinsic load_ubo (ssa_7, ssa_208) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_210 = fmul.sat ssa_193.x, ssa_209.w vec1 32 ssa_211 = fadd abs(ssa_6.y), abs(ssa_6.x) vec1 32 ssa_212 = fadd ssa_211, abs(ssa_6.z) vec1 32 ssa_213 = frcp ssa_212 vec1 32 ssa_214 = fmul ssa_6.x, ssa_213 vec1 32 ssa_215 = fmul ssa_6.y, ssa_213 vec1 32 ssa_216 = fmov -ssa_214 vec1 32 ssa_217 = fmov -ssa_215 vec1 32 ssa_218 = fadd -abs(ssa_215), ssa_8 vec1 32 ssa_219 = fadd -abs(ssa_214), ssa_8 vec1 32 ssa_220 = fge -ssa_214, ssa_1 vec1 32 ssa_221 = bcsel ssa_220, ssa_8, ssa_14 vec1 32 ssa_222 = fge -ssa_215, ssa_1 vec1 32 ssa_223 = bcsel ssa_222, ssa_8, ssa_14 vec1 32 ssa_224 = fmul ssa_218, ssa_221 vec1 32 ssa_225 = fmul ssa_219, ssa_223 vec1 32 ssa_226 = fge ssa_1, -ssa_6.z vec1 32 ssa_227 = bcsel ssa_226, ssa_224, ssa_216 vec1 32 ssa_228 = bcsel ssa_226, ssa_225, ssa_217 vec1 32 ssa_229 = fmul ssa_6.y, ssa_199 vec1 32 ssa_230 = ffma -ssa_6.x, ssa_198, -ssa_229 vec1 32 ssa_231 = ffma -ssa_6.z, ssa_200, ssa_230 vec1 32 ssa_232 = ffma -ssa_231, -ssa_6.x, ssa_198 vec1 32 ssa_233 = ffma -ssa_231, -ssa_6.y, ssa_199 vec1 32 ssa_234 = ffma -ssa_231, -ssa_6.z, ssa_200 vec1 32 ssa_235 = fmul ssa_233, ssa_233 vec1 32 ssa_236 = ffma ssa_232, ssa_232, ssa_235 vec1 32 ssa_237 = ffma ssa_234, ssa_234, ssa_236 vec1 32 ssa_238 = frsq ssa_237 vec1 32 ssa_239 = fmul ssa_238, ssa_232 vec1 32 ssa_240 = fmul ssa_238, ssa_233 vec1 32 ssa_241 = fmul ssa_238, ssa_234 vec1 32 ssa_242 = fmul ssa_6.z, ssa_240 vec1 32 ssa_243 = fmul ssa_6.x, ssa_241 vec1 32 ssa_244 = fmul ssa_6.y, ssa_239 vec1 32 ssa_245 = ffma -ssa_6.y, ssa_241, ssa_242 vec1 32 ssa_246 = ffma -ssa_6.z, ssa_239, ssa_243 vec1 32 ssa_247 = ffma -ssa_6.x, ssa_240, ssa_244 vec1 32 ssa_248 = fmul ssa_206, ssa_240 vec1 32 ssa_249 = ffma ssa_205, ssa_239, ssa_248 vec1 32 ssa_250 = ffma ssa_207, ssa_241, ssa_249 vec1 32 ssa_251 = fmul ssa_206, ssa_246 vec1 32 ssa_252 = ffma ssa_205, ssa_245, ssa_251 vec1 32 ssa_253 = ffma ssa_207, ssa_247, ssa_252 vec1 32 ssa_254 = fadd -abs(ssa_250), ssa_8 vec1 32 ssa_255 = fsqrt ssa_254 vec1 32 ssa_256 = ffma abs(ssa_250), ssa_16, ssa_17 vec1 32 ssa_257 = ffma ssa_256, abs(ssa_250), ssa_18 vec1 32 ssa_258 = ffma ssa_257, abs(ssa_250), ssa_19 vec1 32 ssa_259 = fmul ssa_255, ssa_20 vec1 32 ssa_260 = fmul ssa_259, ssa_258 vec1 32 ssa_261 = fmin ssa_260, ssa_8 vec1 32 ssa_262 = fmul ssa_261, ssa_21 vec1 32 ssa_263 = f2u32 ssa_262 vec1 32 ssa_264 = ffma.sat ssa_227, ssa_22, ssa_22 vec1 32 ssa_265 = ffma.sat ssa_228, ssa_22, ssa_22 vec1 32 ssa_266 = fmul ssa_264, ssa_23 vec1 32 ssa_267 = fmul ssa_265, ssa_23 vec1 32 ssa_268 = f2u32 ssa_266 vec1 32 ssa_269 = f2u32 ssa_267 vec1 32 ssa_270 = ishl ssa_268, ssa_24 vec1 32 ssa_271 = ishl ssa_269, ssa_25 vec1 32 ssa_272 = load_const (0x0000001a /* 0.000000 */) vec1 32 ssa_273 = ishl ssa_263, ssa_272 vec1 32 ssa_274 = iadd ssa_273, ssa_270 vec1 32 ssa_275 = iadd ssa_274, ssa_271 vec1 32 ssa_276 = fadd -ssa_42, ssa_8 vec1 32 ssa_277 = fmin ssa_276, ssa_8 vec1 32 ssa_278 = fmul ssa_277, ssa_26 vec1 32 ssa_279 = f2u32 ssa_278 vec1 32 ssa_280 = ishl ssa_279, ssa_11 vec1 32 ssa_281 = iand ssa_280, ssa_27 vec1 32 ssa_282 = iadd ssa_275, ssa_281 vec1 32 ssa_283 = fge ssa_253, ssa_1 vec1 32 ssa_284 = bfi ssa_11, ssa_283, ssa_282 vec1 32 ssa_285 = fmul ssa_187, ssa_26 vec1 32 ssa_286 = fmul ssa_188, ssa_26 vec1 32 ssa_287 = fmul ssa_189, ssa_21 vec1 32 ssa_288 = f2u32 ssa_285 vec1 32 ssa_289 = f2u32 ssa_286 vec1 32 ssa_290 = f2u32 ssa_287 vec1 32 ssa_291 = ishl ssa_289, ssa_28 vec1 32 ssa_292 = ishl ssa_290, ssa_29 vec1 32 ssa_293 = load_const (0x00000019 /* 0.000000 */) vec1 32 ssa_294 = ishl ssa_288, ssa_293 vec1 32 ssa_295 = iadd ssa_294, ssa_291 vec1 32 ssa_296 = iadd ssa_295, ssa_292 vec1 32 ssa_297 = fmul ssa_210, ssa_21 vec1 32 ssa_298 = f2u32 ssa_297 vec1 32 ssa_299 = ishl ssa_298, ssa_30 vec1 32 ssa_300 = iadd ssa_296, ssa_299 vec4 32 ssa_301 = intrinsic load_ubo (ssa_11, ssa_1) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_302 = load_const (0x0000003f /* 0.000000 */) vec1 32 ssa_303 = bfi ssa_302, ssa_301.x, ssa_300 vec1 32 ssa_304 = f2u32 ssa_160 vec1 32 ssa_305 = ishl ssa_146, ssa_11 vec1 32 ssa_306 = bfi ssa_13, ssa_136, ssa_305 vec1 32 ssa_307 = bfi ssa_9, ssa_137, ssa_306 vec1 32 ssa_308 = iadd ssa_304, ssa_307 vec1 32 ssa_309 = ieq ssa_130, ssa_1 /* succs: block_12 block_13 */ if ssa_309 { block block_12: /* preds: block_11 */ vec1 32 ssa_310 = ishl ssa_308, ssa_9 vec2 32 ssa_311 = vec2 ssa_284, ssa_303 intrinsic store_ssbo (ssa_311, ssa_11, ssa_310) (3, 0, 16, 0) /* wrmask=xy */ /* access=0 */ /* align_mul=16 */ /* align_offset=0 */ /* succs: block_14 */ } else { block block_13: /* preds: block_11 */ /* succs: block_14 */ } block block_14: /* preds: block_12 block_13 */ /* succs: block_15 block_16 */ if ssa_309 { block block_15: /* preds: block_14 */ vec1 32 ssa_312 = imov ssa_301.y intrinsic store_ssbo (ssa_312, ssa_1, ssa_152) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_17 */ } else { block block_16: /* preds: block_14 */ /* succs: block_17 */ } block block_17: /* preds: block_15 block_16 */ /* succs: block_18 block_19 */ if ssa_309 { block block_18: /* preds: block_17 */ vec1 32 ssa_313 = imov ssa_301.z intrinsic store_ssbo (ssa_313, ssa_1, ssa_155) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_20 */ } else { block block_19: /* preds: block_17 */ /* succs: block_20 */ } block block_20: /* preds: block_18 block_19 */ /* succs: block_22 */ } else { block block_21: /* preds: block_10 */ /* succs: block_22 */ } block block_22: /* preds: block_20 block_21 */ vec4 32 ssa_314 = intrinsic load_ubo (ssa_13, ssa_1) (16, 0) /* align_mul=16 */ /* align_offset=0 */ vec1 32 ssa_315 = fmul ssa_42, ssa_314.x vec1 32 ssa_316 = fmul ssa_42, ssa_314.y vec1 32 ssa_317 = fmul ssa_42, ssa_314.z vec1 32 ssa_318 = fmul ssa_315, ssa_314.w vec1 32 ssa_319 = fmul ssa_316, ssa_314.w vec1 32 ssa_320 = fmul ssa_317, ssa_314.w vec1 32 ssa_321 = fmov -ssa_2.z vec1 32 ssa_322 = fadd -ssa_2.z, ssa_31 vec1 32 ssa_323 = fmax ssa_322, ssa_1 vec1 32 ssa_324 = fmov.sat ssa_41.w vec1 32 ssa_325 = fmul ssa_323, ssa_32 vec1 32 ssa_326 = ffma -ssa_325, ssa_325, ssa_8 vec1 32 ssa_327 = fmax ssa_326, ssa_1 vec1 32 ssa_328 = fmul ssa_327, ssa_324 intrinsic discard_if (ssa_130) () vec4 32 ssa_329 = vec4 ssa_318, ssa_319, ssa_320, ssa_42 intrinsic store_output (ssa_329, ssa_1) (8, 15, 0) /* base=8 */ /* wrmask=xyzw */ /* component=0 */ /* o0 */ vec4 32 ssa_330 = vec4 ssa_321, ssa_33, ssa_33, ssa_328 intrinsic store_output (ssa_330, ssa_1) (10, 15, 0) /* base=10 */ /* wrmask=xyzw */ /* component=0 */ /* o1 */ intrinsic store_output (ssa_34, ssa_1) (12, 15, 0) /* base=12 */ /* wrmask=xyzw */ /* component=0 */ /* o2 */ /* succs: block_23 */ block block_23: } Native code for unnamed fragment shader GLSL1 SIMD8 shader: 326 instructions. 2 loops. 14465 cycles. 0:0 spills:fills. Promoted 6 constants. Compacted 5216 to 3696 bytes (29%) START B0 (294 cycles) add(16) g41<1>UW g1.4<1,4,0>UW 0x11001010V { align1 WE_all 1H }; mov(8) g39<1>F g41<8,4,1>UW { align1 1Q }; mov(8) g40<1>F g41.4<8,4,1>UW { align1 1Q }; mov(1) f0.1<1>UW g1.14<0,1,0>UW { align1 WE_all 1N }; pln(8) g77<1>F g13<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g79<1>F g13.4<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g116<1>F g14<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g46<1>F g22.4<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; mov(8) g25<1>F g4<8,8,1>F { align1 1Q compacted }; pln(8) g26<1>F g17<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g27<1>F g17.4<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g28<1>F g18<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g29<1>F g19<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g30<1>F g19.4<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g31<1>F g20<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; mov(8) g47<1>UD 0x00000004UD { align1 1Q compacted }; mov(8) g48<1>UD 0x00000001UD { align1 1Q compacted }; mov(8) g49<1>UD 0x00000002UD { align1 1Q compacted }; mov(8) g50<1>D -1082130432D { align1 1Q }; mov(8) g51<1>UD 0x00000008UD { align1 1Q compacted }; pln(8) g32<1>F g15<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; pln(8) g33<1>F g15.4<0,1,0>F g2<8,8,1>F { align1 1Q compacted }; mov(8) g43<1>UD 0x00000000UD { align1 1Q compacted }; mov(8) g42<1>D 1065353216D { align1 1Q }; mul(8) g61<1>F g32<8,8,1>F g10<0,1,0>F { align1 1Q compacted }; mul(8) g62<1>F g33<8,8,1>F g10.1<0,1,0>F { align1 1Q compacted }; send(8) g2<1>UW g61<0,1,0>F 0x04420003 sampler MsgDesc: sample SIMD8 Surface = 3 Sampler = 0 mlen 2 rlen 4 { align1 1Q }; sel.l(8) g18<1>F g5<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; END B0 ->B1 START B2 <-B1 <-B3 (256 cycles) mov(8) g52<1>F g43<8,8,1>D { align1 1Q compacted }; END B1 ->B2 ->B4 mov(8) g22<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; mov(1) g22.2<1>UD 0x00000088UD { align1 WE_all 1N compacted }; send(16) g19<1>UD g22<0,1,0>UD 0x0228030b const MsgDesc: (11, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; cmp.ge.f0(8) null<1>F g52<8,8,1>F g19.2<0,1,0>F { align1 1Q compacted }; (+f0) break(8) JIP: 344 UIP: 344 { align1 1Q }; END B2 ->B1 ->B3 START B3 <-B2 (422 cycles) add(8) g22<1>D g43<8,8,1>D 32D { align1 1Q compacted }; mov(8) g38<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; add(8) g43<1>D g43<8,8,1>D 1D { align1 1Q compacted }; shl(8) g22<1>D g22<8,8,1>D 0x00000004UD { align1 1Q }; mov(1) g38.2<1>UD 0x00000080UD { align1 WE_all 1N compacted }; send(8) g34<1>UW g22<0,1,0>UD 0x0242700b sampler MsgDesc: ld SIMD8 Surface = 11 Sampler = 0 mlen 1 rlen 4 { align1 1Q }; send(16) g59<1>UD g38<0,1,0>UD 0x0228030b const MsgDesc: (11, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; math inv(8) g63<1>F (abs)g37<8,8,1>F null<8,8,1>F { align1 1Q compacted }; mul(8) g38<1>F g35<8,8,1>F g59.4<0,1,0>F { align1 1Q compacted }; mul(8) g41<1>F g35<8,8,1>F g59.5<0,1,0>F { align1 1Q compacted }; mul(8) g44<1>F g35<8,8,1>F g59.6<0,1,0>F { align1 1Q compacted }; mad(8) g45<1>F g38<4,4,1>F g59.0<0,1,0>F g34<4,4,1>F { align16 1Q }; mad(8) g52<1>F g41<4,4,1>F g59.1<0,1,0>F g34<4,4,1>F { align16 1Q }; mad(8) g53<1>F g44<4,4,1>F g59.2<0,1,0>F g34<4,4,1>F { align16 1Q }; mad(8) g54<1>F g45<4,4,1>F g60.0<0,1,0>F g36<4,4,1>F { align16 1Q }; mad(8) g55<1>F g52<4,4,1>F g60.1<0,1,0>F g36<4,4,1>F { align16 1Q }; mad(8) g56<1>F g53<4,4,1>F g60.2<0,1,0>F g36<4,4,1>F { align16 1Q }; add(8) g57<1>F g54<8,8,1>F g60.4<0,1,0>F { align1 1Q compacted }; add(8) g58<1>F g55<8,8,1>F g60.5<0,1,0>F { align1 1Q compacted }; add(8) g59<1>F g56<8,8,1>F g60.6<0,1,0>F { align1 1Q compacted }; add(8) g60<1>F -g57<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; add(8) g61<1>F -g58<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; add(8) g62<1>F -g59<8,8,1>F g116<8,8,1>F { align1 1Q compacted }; mul(8) g64<1>F g60<8,8,1>F g63<8,8,1>F { align1 1Q compacted }; mul(8) g65<1>F g61<8,8,1>F g63<8,8,1>F { align1 1Q compacted }; mul(8) g66<1>F g62<8,8,1>F g63<8,8,1>F { align1 1Q compacted }; mul(8) g67<1>F g65<8,8,1>F g65<8,8,1>F { align1 1Q compacted }; mad(8) g68<1>F g67<4,4,1>F g64<4,4,1>F g64<4,4,1>F { align16 1Q compacted }; mad(8) g69<1>F g68<4,4,1>F g66<4,4,1>F g66<4,4,1>F { align16 1Q compacted }; sel.l(8) g70<1>F g69<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; sel.l(8) g42<1>F g70<8,8,1>F g42<8,8,1>F { align1 1Q compacted }; while(8) JIP: -392 { align1 1Q }; END B3 ->B2 START B4 <-B1 (4 cycles) mov(8) g45<1>UD 0x00000000UD { align1 1Q compacted }; mov(8) g44<1>UD 0x00000000UD { align1 1Q compacted }; END B4 ->B5 START B6 <-B5 <-B7 (34 cycles) mov(8) g71<1>F g45<8,8,1>D { align1 1Q compacted }; END B5 ->B6 ->B8 cmp.ge.f0(8) null<1>F g71<8,8,1>F g19<0,1,0>F { align1 1Q compacted }; (+f0) break(8) JIP: 376 UIP: 376 { align1 1Q }; END B6 ->B5 ->B7 START B7 <-B6 (486 cycles) shl(8) g34<1>D g45<8,8,1>D 0x00000004UD { align1 1Q }; mov(8) g41<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; add(8) g45<1>D g45<8,8,1>D 1D { align1 1Q compacted }; send(8) g35<1>UW g34<0,1,0>UD 0x0242700b sampler MsgDesc: ld SIMD8 Surface = 11 Sampler = 0 mlen 1 rlen 4 { align1 1Q }; mov(1) g41.2<1>UD 0x00000080UD { align1 WE_all 1N compacted }; send(16) g60<1>UD g41<0,1,0>UD 0x0228030b const MsgDesc: (11, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; math inv(8) g87<1>F g38<8,8,1>F null<8,8,1>F { align1 1Q compacted }; mul(8) g72<1>F g36<8,8,1>F g60.4<0,1,0>F { align1 1Q compacted }; mul(8) g73<1>F g36<8,8,1>F g60.5<0,1,0>F { align1 1Q compacted }; mul(8) g74<1>F g36<8,8,1>F g60.6<0,1,0>F { align1 1Q compacted }; mad(8) g75<1>F g72<4,4,1>F g60.0<0,1,0>F g35<4,4,1>F { align16 1Q }; mad(8) g76<1>F g73<4,4,1>F g60.1<0,1,0>F g35<4,4,1>F { align16 1Q }; mad(8) g21<1>F g74<4,4,1>F g60.2<0,1,0>F g35<4,4,1>F { align16 1Q }; mad(8) g78<1>F g75<4,4,1>F g61.0<0,1,0>F g37<4,4,1>F { align16 1Q }; mad(8) g23<1>F g76<4,4,1>F g61.1<0,1,0>F g37<4,4,1>F { align16 1Q }; mad(8) g80<1>F g21<4,4,1>F g61.2<0,1,0>F g37<4,4,1>F { align16 1Q }; add(8) g81<1>F g78<8,8,1>F g61.4<0,1,0>F { align1 1Q compacted }; add(8) g82<1>F g23<8,8,1>F g61.5<0,1,0>F { align1 1Q compacted }; add(8) g83<1>F g80<8,8,1>F g61.6<0,1,0>F { align1 1Q compacted }; add(8) g84<1>F -g81<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; add(8) g85<1>F -g82<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; add(8) g86<1>F -g83<8,8,1>F g116<8,8,1>F { align1 1Q compacted }; mul(8) g88<1>F g84<8,8,1>F g87<8,8,1>F { align1 1Q compacted }; mul(8) g89<1>F g85<8,8,1>F g87<8,8,1>F { align1 1Q compacted }; mul(8) g90<1>F g86<8,8,1>F g87<8,8,1>F { align1 1Q compacted }; mul(8) g91<1>F g89<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; mad(8) g92<1>F g91<4,4,1>F g88<4,4,1>F g88<4,4,1>F { align16 1Q compacted }; mad(8) g93<1>F g92<4,4,1>F g90<4,4,1>F g90<4,4,1>F { align16 1Q compacted }; add(8) g94<1>F -g93<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; sel.ge(8) g95<1>F g94<8,8,1>F 0x0F /* 0F */ { align1 1Q compacted }; add(8) g96<1>F g95<8,8,1>F g95<8,8,1>F { align1 1Q compacted }; mul(8) g97<1>F g95<8,8,1>F g96<8,8,1>F { align1 1Q compacted }; sel.l(8) g98<1>F g97<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; sel.ge(8) g44<1>F g44<8,8,1>F g98<8,8,1>F { align1 1Q compacted }; while(8) JIP: -392 { align1 1Q }; END B7 ->B6 START B8 <-B5 (1124 cycles) add(8) g99<1>F g42<8,8,1>F g44<8,8,1>F { align1 1Q compacted }; add(8) g102<1>F g39<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1Q }; add(8) g103<1>F g40<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1Q }; mov(8) g41<1>UD 0x00000010UD { align1 1Q compacted }; mov(8) g42<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; add.sat(8) g100<1>F g99<8,8,1>F g46<8,8,1>F { align1 1Q compacted }; mov(8) g105<1>UD g102<8,8,1>F { align1 1Q compacted }; mad(8) g104<1>F g6.1<0,1,0>F g6.0<0,1,0>F g103<4,4,1>F { align16 1Q }; mov(1) g42.2<1>UD 0x00000018UD { align1 WE_all 1N compacted }; cmp.l.f0(8) g101<1>F g100<8,8,1>F 0x3f7d70a4F /* 0.99F */ { align1 1Q }; shr(8) g22<1>UD g105<8,8,1>UD 0x00000001UD { align1 1Q compacted }; send(16) g19<1>UD g42<0,1,0>UD 0x02280307 const MsgDesc: (7, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; mov(8) g106<1>UD g104<8,8,1>F { align1 1Q compacted }; shr(8) g34<1>UD g106<8,8,1>UD 0x00000001UD { align1 1Q compacted }; mov(8) g6<1>UD g20.2<0,1,0>F { align1 1Q compacted }; shr(8) g35<1>UD g6<8,8,1>UD 0x00000001UD { align1 1Q compacted }; mul(8) g36<1>D g34<8,8,1>D g35<8,8,1>D { align1 1Q compacted }; add(8) g37<1>D g36<8,8,1>D g22<8,8,1>D { align1 1Q compacted }; shl(8) g38<1>D g37<8,8,1>D 0x00000002UD { align1 1Q }; shl(8) g39<1>D g38<8,8,1>D 0x00000003UD { align1 1Q }; bfi2(8) g40<1>UD g51<4,4,1>UD g105<4,4,1>UD g39<4,4,1>UD { align16 1Q }; bfi2(8) g42<1>UD g41<4,4,1>UD g106<4,4,1>UD g40<4,4,1>UD { align16 1Q }; shr(8) g43<1>UD g42<8,8,1>UD 0x00000002UD { align1 1Q compacted }; shl(8) g19<1>D g43<8,8,1>D 0x00000002UD { align1 1Q }; add(8) g45<1>D g43<8,8,1>D 1D { align1 1Q compacted }; send(8) g44<1>UW g19<8,8,1>UD 0x02106e0e dp data 1 MsgDesc: ( untyped surface read, Surface = 14, SIMD8, Mask = 0xe) mlen 1 rlen 1 { align1 1Q }; shl(8) g35<1>D g45<8,8,1>D 0x00000002UD { align1 1Q }; send(8) g46<1>UW g35<8,8,1>UD 0x02106e0e dp data 1 MsgDesc: ( untyped surface read, Surface = 14, SIMD8, Mask = 0xe) mlen 1 rlen 1 { align1 1Q }; cmp.z.f0(8) null<1>F g46<8,8,1>F g25<8,8,1>F { align1 1Q compacted }; (-f0) sel(8) g51<1>UD g50<8,8,1>UD 0x3f800000UD { align1 1Q }; cmp.z.f0(8) null<1>F g44<8,8,1>F g25<8,8,1>F { align1 1Q compacted }; (-f0) sel(8) g52<1>UD g51<8,8,1>UD 0x00000000UD { align1 1Q }; cmp.ge.f0(8) g53<1>F g52<8,8,1>F 0x0F /* 0F */ { align1 1Q compacted }; cmp.g.f0(8) g54<1>F g18<8,8,1>F 0x3ca3d70aF /* 0.02F */ { align1 1Q }; and.nz.f0(8) null<1>UD g54<8,8,1>UD g53<8,8,1>UD { align1 1Q compacted }; (+f0) if(8) JIP: 1776 UIP: 1776 { align1 1Q }; END B8 ->B9 ->B16 START B9 <-B8 (699 cycles) mul(8) g39<1>F g32<8,8,1>F g9.6<0,1,0>F { align1 1Q compacted }; mul(8) g40<1>F g33<8,8,1>F g9.7<0,1,0>F { align1 1Q compacted }; mul(8) g41<1>F g32<8,8,1>F g9.4<0,1,0>F { align1 1Q compacted }; mul(8) g42<1>F g33<8,8,1>F g9.5<0,1,0>F { align1 1Q compacted }; mul(8) g43<1>F g32<8,8,1>F g10.2<0,1,0>F { align1 1Q compacted }; mul(8) g44<1>F g33<8,8,1>F g10.3<0,1,0>F { align1 1Q compacted }; mul(8) g66<1>F g79<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; mul(8) g73<1>F g27<8,8,1>F g27<8,8,1>F { align1 1Q compacted }; add(8) g81<1>F (abs)g30<8,8,1>F (abs)g29<8,8,1>F { align1 1Q compacted }; mov(1) g22<1>F 0x3d981627F /* 0.074261F */ { align1 WE_all 1N }; add(8) g37<1>F -g18<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; mov(8) g25<1>F g12<0,1,0>F { align1 1Q compacted }; mov(8) g104<1>UD 0x0000003fUD { align1 1Q compacted }; mov(8) g107<1>UD g52<8,8,1>F { align1 1Q compacted }; shl(8) g108<1>D g38<8,8,1>D 0x00000001UD { align1 1Q }; send(8) g39<1>UW g39<0,1,0>F 0x04220104 sampler MsgDesc: sample SIMD8 Surface = 4 Sampler = 1 mlen 2 rlen 2 { align1 1Q }; send(8) g32<1>UW g41<0,1,0>F 0x04320205 sampler MsgDesc: sample SIMD8 Surface = 5 Sampler = 2 mlen 2 rlen 3 { align1 1Q }; send(8) g6<1>UW g43<0,1,0>F 0x04120306 sampler MsgDesc: sample SIMD8 Surface = 6 Sampler = 3 mlen 2 rlen 1 { align1 1Q }; mad(8) g67<1>F g66<4,4,1>F g77<4,4,1>F g77<4,4,1>F { align16 1Q compacted }; mad(8) g74<1>F g73<4,4,1>F g26<4,4,1>F g26<4,4,1>F { align16 1Q compacted }; add(8) g82<1>F g81<8,8,1>F (abs)g31<8,8,1>F { align1 1Q compacted }; mov(1) g22.1<1>F 0x3c996e30F /* 0.0187293F */ { align1 WE_all 1N }; sel.l(8) g38<1>F g37<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; bfi2(8) g109<1>UD g49<4,4,1>UD g105<4,4,1>UD g108<4,4,1>UD { align16 1Q }; math inv(8) g83<1>F g82<8,8,1>F null<8,8,1>F { align1 1Q compacted }; mad(8) g68<1>F g67<4,4,1>F g116<4,4,1>F g116<4,4,1>F { align16 1Q compacted }; mad(8) g75<1>F g74<4,4,1>F g28<4,4,1>F g28<4,4,1>F { align16 1Q compacted }; bfi2(8) g110<1>UD g47<4,4,1>UD g106<4,4,1>UD g109<4,4,1>UD { align16 1Q }; mul.le.f0(8) g84<1>F g29<8,8,1>F g83<8,8,1>F { align1 1Q compacted }; mul(8) g85<1>F g30<8,8,1>F g83<8,8,1>F { align1 1Q compacted }; math rsq(8) g69<1>F g68<8,8,1>F null<8,8,1>F { align1 1Q compacted }; math rsq(8) g76<1>F g75<8,8,1>F null<8,8,1>F { align1 1Q compacted }; add(8) g87<1>F -(abs)g84<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; (-f0) sel(8) g88<1>UD g50<8,8,1>UD 0x3f800000UD { align1 1Q }; add(8) g86<1>F -(abs)g85<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; mul(8) g70<1>F g69<8,8,1>F g77<8,8,1>F { align1 1Q compacted }; mul(8) g71<1>F g69<8,8,1>F g79<8,8,1>F { align1 1Q compacted }; mul(8) g72<1>F g69<8,8,1>F g116<8,8,1>F { align1 1Q compacted }; mul(8) g21<1>F g76<8,8,1>F g26<8,8,1>F { align1 1Q compacted }; mul(8) g78<1>F g76<8,8,1>F g27<8,8,1>F { align1 1Q compacted }; mul(8) g23<1>F g76<8,8,1>F g28<8,8,1>F { align1 1Q compacted }; cmp.ge.f0(8) null<1>F -g85<8,8,1>F 0x0F /* 0F */ { align1 1Q compacted }; mul(8) g90<1>F g86<8,8,1>F g88<8,8,1>F { align1 1Q compacted }; mul(8) g94<1>F g30<8,8,1>F g71<8,8,1>F { align1 1Q compacted }; (-f0) sel(8) g89<1>UD g50<8,8,1>UD 0x3f800000UD { align1 1Q }; csel.le(8) g92<1>F g90<4,4,1>F -g84<4,4,1>F -g31<4,4,1>F { align16 1Q }; mad(8) g95<1>F -g94<4,4,1>F g70<4,4,1>F -g29<4,4,1>F { align16 1Q }; mul(8) g91<1>F g87<8,8,1>F g89<8,8,1>F { align1 1Q compacted }; mad(8) g96<1>F g95<4,4,1>F g72<4,4,1>F -g31<4,4,1>F { align16 1Q compacted }; csel.le(8) g93<1>F g91<4,4,1>F -g85<4,4,1>F -g31<4,4,1>F { align16 1Q }; mad(8) g97<1>F g70<4,4,1>F -g29<4,4,1>F -g96<4,4,1>F { align16 1Q }; mad(8) g98<1>F g71<4,4,1>F -g30<4,4,1>F -g96<4,4,1>F { align16 1Q }; mad(8) g99<1>F g72<4,4,1>F -g31<4,4,1>F -g96<4,4,1>F { align16 1Q }; mul(8) g100<1>F g98<8,8,1>F g98<8,8,1>F { align1 1Q compacted }; mad(8) g102<1>F g100<4,4,1>F g97<4,4,1>F g97<4,4,1>F { align16 1Q compacted }; mad(8) g103<1>F g102<4,4,1>F g99<4,4,1>F g99<4,4,1>F { align16 1Q compacted }; mul(8) g55<1>F g39<8,8,1>F g9<0,1,0>F { align1 1Q compacted }; mul(8) g56<1>F g40<8,8,1>F g9.1<0,1,0>F { align1 1Q compacted }; mul(8) g39<1>F g38<8,8,1>F 0x42fe0000F /* 127F */ { align1 1Q }; mul.sat(8) g80<1>F g6<8,8,1>F g10.7<0,1,0>F { align1 1Q compacted }; add(8) g6<1>D g107<8,8,1>D g110<8,8,1>D { align1 1Q compacted }; mul(8) g57<1>F g56<8,8,1>F g7.4<0,1,0>F { align1 1Q compacted }; mul(8) g58<1>F g56<8,8,1>F g7.5<0,1,0>F { align1 1Q compacted }; mul(8) g59<1>F g56<8,8,1>F g7.6<0,1,0>F { align1 1Q compacted }; mov(8) g40<1>UD g39<8,8,1>F { align1 1Q compacted }; mul(8) g63<1>F g80<8,8,1>F 0x427c0000F /* 63F */ { align1 1Q }; mad(8) g60<1>F g57<4,4,1>F g7.0<0,1,0>F g32<4,4,1>F { align16 1Q }; mad(8) g61<1>F g58<4,4,1>F g7.1<0,1,0>F g33<4,4,1>F { align16 1Q }; mad(8) g62<1>F g59<4,4,1>F g7.2<0,1,0>F g34<4,4,1>F { align16 1Q }; shl(8) g41<1>D g40<8,8,1>D 0x00000001UD { align1 1Q }; mov(8) g66<1>UD g63<8,8,1>F { align1 1Q compacted }; mad.sat(8) g63<1>F g60<4,4,1>F g8.0<0,1,0>F g55<4,4,1>F { align16 1Q }; and(8) g42<1>UD g41<8,8,1>UD 0x000000fcUD { align1 1Q compacted }; mad.sat(8) g64<1>F g61<4,4,1>F g8.1<0,1,0>F g55<4,4,1>F { align16 1Q }; shl(8) g67<1>D g66<8,8,1>D 0x00000006UD { align1 1Q }; mad.sat(8) g65<1>F g62<4,4,1>F g8.2<0,1,0>F g55<4,4,1>F { align16 1Q }; mul(8) g45<1>F g63<8,8,1>F 0x42fe0000F /* 127F */ { align1 1Q }; mul(8) g46<1>F g64<8,8,1>F 0x42fe0000F /* 127F */ { align1 1Q }; mul(8) g47<1>F g65<8,8,1>F 0x427c0000F /* 63F */ { align1 1Q }; mov(8) g49<1>UD g45<8,8,1>F { align1 1Q compacted }; mov(8) g50<1>UD g46<8,8,1>F { align1 1Q compacted }; mov(8) g51<1>UD g47<8,8,1>F { align1 1Q compacted }; shl(8) g54<1>D g49<8,8,1>D 0x00000019UD { align1 1Q }; shl(8) g52<1>D g50<8,8,1>D 0x00000012UD { align1 1Q }; shl(8) g53<1>D g51<8,8,1>D 0x0000000cUD { align1 1Q }; add(8) g55<1>D g54<8,8,1>D g52<8,8,1>D { align1 1Q compacted }; add(8) g56<1>D g55<8,8,1>D g53<8,8,1>D { align1 1Q compacted }; add(8) g68<1>D g56<8,8,1>D g67<8,8,1>D { align1 1Q compacted }; bfi2(8) g9<1>UD g104<4,4,1>UD g25<4,4,1>UD g68<4,4,1>UD { align16 1Q }; math rsq(8) g104<1>F g103<8,8,1>F null<8,8,1>F { align1 1Q compacted }; mul(8) g105<1>F g104<8,8,1>F g97<8,8,1>F { align1 1Q compacted }; mul(8) g106<1>F g104<8,8,1>F g98<8,8,1>F { align1 1Q compacted }; mul(8) g107<1>F g104<8,8,1>F g99<8,8,1>F { align1 1Q compacted }; mul(8) g110<1>F g30<8,8,1>F g105<8,8,1>F { align1 1Q compacted }; mul(8) g108<1>F g31<8,8,1>F g106<8,8,1>F { align1 1Q compacted }; mul(8) g114<1>F g78<8,8,1>F g106<8,8,1>F { align1 1Q compacted }; mul(8) g109<1>F g29<8,8,1>F g107<8,8,1>F { align1 1Q compacted }; mad(8) g113<1>F g110<4,4,1>F g106<4,4,1>F -g29<4,4,1>F { align16 1Q compacted }; mad(8) g111<1>F g108<4,4,1>F g107<4,4,1>F -g30<4,4,1>F { align16 1Q compacted }; mad(8) g115<1>F g114<4,4,1>F g105<4,4,1>F g21<4,4,1>F { align16 1Q compacted }; mad(8) g112<1>F g109<4,4,1>F g105<4,4,1>F -g31<4,4,1>F { align16 1Q compacted }; mad(8) g24<1>F g115<4,4,1>F g107<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; mul(8) g117<1>F g78<8,8,1>F g112<8,8,1>F { align1 1Q compacted }; mad(8) g118<1>F g117<4,4,1>F g111<4,4,1>F g21<4,4,1>F { align16 1Q compacted }; add(8) g120<1>F -(abs)g24<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; mad(8) g122<1>F g22.0<0,1,0>F -g22.1<0,1,0>F (abs)g24<4,4,1>F { align16 1Q }; mov(1) g22.2<1>F 0x3e593484F /* 0.212114F */ { align1 WE_all 1N }; math sqrt(8) g121<1>F g120<8,8,1>F null<8,8,1>F { align1 1Q compacted }; mad(8) g119<1>F g118<4,4,1>F g113<4,4,1>F g23<4,4,1>F { align16 1Q compacted }; mad(8) g123<1>F -g22.2<0,1,0>F (abs)g24<4,4,1>F g122<4,4,1>F { align16 1Q }; mov(1) g22.3<1>F 0x3fc90da4F /* 1.57073F */ { align1 WE_all 1N }; mul(8) g125<1>F g121<8,8,1>F 0x3f22f984F /* 0.63662F */ { align1 1Q }; cmp.ge.f0(8) g44<1>F g119<8,8,1>F 0x0F /* 0F */ { align1 1Q compacted }; cmp.z.f0(8) g111<1>D g101<8,8,1>D 0D { align1 1Q compacted }; mad(8) g124<1>F g22.3<0,1,0>F (abs)g24<4,4,1>F g123<4,4,1>F { align16 1Q }; mov(1) g22.4<1>F 0x3f000000F /* 0.5F */ { align1 WE_all 1N }; mad.sat(8) g79<1>F g22.4<0,1,0>F g22.4<0,1,0>F g92<4,4,1>F { align16 1Q }; mad.sat(8) g25<1>F g22.4<0,1,0>F g22.4<0,1,0>F g93<4,4,1>F { align16 1Q }; mul(8) g126<1>F g125<8,8,1>F g124<8,8,1>F { align1 1Q compacted }; sel.l(8) g127<1>F g126<8,8,1>F 0x3f800000F /* 1F */ { align1 1Q }; mul(8) g26<1>F g79<8,8,1>F 0x43ff8000F /* 511F */ { align1 1Q }; mul(8) g27<1>F g25<8,8,1>F 0x43ff8000F /* 511F */ { align1 1Q }; mul(8) g10<1>F g127<8,8,1>F 0x427c0000F /* 63F */ { align1 1Q }; mov(8) g28<1>UD g26<8,8,1>F { align1 1Q compacted }; mov(8) g29<1>UD g27<8,8,1>F { align1 1Q compacted }; mov(8) g77<1>UD g10<8,8,1>F { align1 1Q compacted }; shl(8) g30<1>D g28<8,8,1>D 0x00000011UD { align1 1Q }; shl(8) g31<1>D g29<8,8,1>D 0x00000008UD { align1 1Q }; shl(8) g32<1>D g77<8,8,1>D 0x0000001aUD { align1 1Q }; add(8) g33<1>D g32<8,8,1>D g30<8,8,1>D { align1 1Q compacted }; add(8) g34<1>D g33<8,8,1>D g31<8,8,1>D { align1 1Q compacted }; add(8) g43<1>D g34<8,8,1>D g42<8,8,1>D { align1 1Q compacted }; bfi2(8) g8<1>UD g48<4,4,1>UD g44<4,4,1>UD g43<4,4,1>UD { align16 1Q }; (+f0) if(8) JIP: 64 UIP: 64 { align1 1Q }; END B9 ->B10 ->B11 START B10 <-B9 (20 cycles) shl(8) g7<1>D g6<8,8,1>D 0x00000004UD { align1 1Q }; mov(1) f1<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f1) send(8) null<1>UW g7<8,8,1>UD 0x06026c0d dp data 1 MsgDesc: ( DC untyped surface write, Surface = 13, SIMD8, Mask = 0xc) mlen 3 rlen 0 { align1 1Q }; END B10 ->B11 START B11 <-B9 <-B10 (20 cycles) endif(8) JIP: 192 { align1 1Q }; mov.nz.f0(8) null<1>D g111<8,8,1>D { align1 1Q }; (+f0) if(8) JIP: 56 UIP: 56 { align1 1Q }; END B11 ->B12 ->B13 START B12 <-B11 (20 cycles) mov(8) g20<1>UD g12.1<0,1,0>UD { align1 1Q compacted }; mov(1) f1<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f1) send(8) null<1>UW g19<8,8,1>UD 0x04026e0c dp data 1 MsgDesc: ( DC untyped surface write, Surface = 12, SIMD8, Mask = 0xe) mlen 2 rlen 0 { align1 1Q }; END B12 ->B13 START B13 <-B11 <-B12 (20 cycles) endif(8) JIP: 104 { align1 1Q }; mov.nz.f0(8) null<1>D g111<8,8,1>D { align1 1Q }; (+f0) if(8) JIP: 56 UIP: 56 { align1 1Q }; END B13 ->B14 ->B15 START B14 <-B13 (20 cycles) mov(8) g36<1>UD g12.2<0,1,0>UD { align1 1Q compacted }; mov(1) f1<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f1) send(8) null<1>UW g35<8,8,1>UD 0x04026e0c dp data 1 MsgDesc: ( DC untyped surface write, Surface = 12, SIMD8, Mask = 0xe) mlen 2 rlen 0 { align1 1Q }; END B14 ->B15 START B15 <-B13 <-B14 (2 cycles) endif(8) JIP: 16 { align1 1Q }; END B15 ->B16 START B16 <-B8 <-B15 (222 cycles) endif(8) JIP: 16 { align1 1Q }; mul(8) g114<1>F g18<8,8,1>F g11<0,1,0>F { align1 1Q compacted }; mul(8) g115<1>F g18<8,8,1>F g11.1<0,1,0>F { align1 1Q compacted }; mul(8) g24<1>F g18<8,8,1>F g11.2<0,1,0>F { align1 1Q compacted }; add(8) g117<1>F -g116<8,8,1>F 0xc1f00000F /* -30F */ { align1 1Q }; mov.sat(8) g119<1>F g5<8,8,1>F { align1 1Q compacted }; mov(1) g22.5<1>F 0x3f800000F /* 1F */ { align1 WE_all 1N }; (+f0.1) cmp.z.f0.1(8) null<1>D g101<8,8,1>D 0D { align1 1Q }; mul(8) g15<1>F g114<8,8,1>F g11.3<0,1,0>F { align1 1Q compacted }; mul(8) g16<1>F g115<8,8,1>F g11.3<0,1,0>F { align1 1Q compacted }; mul(8) g17<1>F g24<8,8,1>F g11.3<0,1,0>F { align1 1Q compacted }; sel.ge(8) g118<1>F g117<8,8,1>F 0x0F /* 0F */ { align1 1Q compacted }; mul(8) g120<1>F g118<8,8,1>F 0x3c888889F /* 0.0166667F */ { align1 1Q }; mad(8) g121<1>F g22.5<0,1,0>F g120<4,4,1>F -g120<4,4,1>F { align16 1Q }; sel.ge(8) g122<1>F g121<8,8,1>F 0x0F /* 0F */ { align1 1Q compacted }; mul(8) g7<1>F g122<8,8,1>F g119<8,8,1>F { align1 1Q compacted }; mov(16) g13<1>UD g0<8,8,1>UD { align1 WE_all 1H compacted }; mov(1) g14.14<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f0.1) sendc(8) null<1>UW g13<0,1,0>F 0x0c0b0400 render MsgDesc: RT write SIMD8 Surface = 0 mlen 6 rlen 0 { align1 1Q }; mov(16) g2<1>UD g0<8,8,1>UD { align1 WE_all 1H compacted }; mov(1) g2.2<1>UD 0x00000001UD { align1 WE_all 1N compacted }; mov(1) g3.14<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; mov(8) g4<1>F -g116<8,8,1>F { align1 1Q compacted }; mov(8) g5<1>F 0x477fe000F /* 65504F */ { align1 1Q }; mov(8) g6<1>F 0x477fe000F /* 65504F */ { align1 1Q }; (+f0.1) sendc(8) null<1>UW g2<0,1,0>F 0x0c0b0401 render MsgDesc: RT write SIMD8 Surface = 1 mlen 6 rlen 0 { align1 1Q }; mov(16) g122<1>UD g0<8,8,1>UD { align1 WE_all 1H compacted }; mov(1) g122.2<1>UD 0x00000002UD { align1 WE_all 1N compacted }; mov(1) g123.14<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; mov(8) g124<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1Q compacted }; mov(8) g125<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1Q compacted }; mov(8) g126<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1Q compacted }; mov(8) g127<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1Q compacted }; (+f0.1) sendc(8) null<1>UW g122<0,1,0>F 0x8c0b1402 render MsgDesc: RT write SIMD8 LastRT Surface = 2 mlen 6 rlen 0 { align1 1Q EOT }; END B16 Native code for unnamed fragment shader GLSL1 SIMD16 shader: 326 instructions. 2 loops. 15698 cycles. 0:0 spills:fills. Promoted 6 constants. Compacted 5216 to 3680 bytes (29%) START B0 (338 cycles) add(32) g125<1>UW g1.4<1,4,0>UW 0x11001010V { align1 WE_all }; mov(16) g85<1>F g125<8,4,1>UW { align1 1H }; mov(16) g41<1>F g125.4<8,4,1>UW { align1 1H }; mov(1) f0.1<1>UW g1.14<0,1,0>UW { align1 WE_all 1N }; pln(16) g67<1>F g17<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g65<1>F g17.4<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g75<1>F g18<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g43<1>F g26.4<0,1,0>F g2<8,8,1>F { align1 1H compacted }; mov(16) g83<1>F g6<8,8,1>F { align1 1H compacted }; pln(16) g59<1>F g21<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g61<1>F g21.4<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g63<1>F g22<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g55<1>F g23<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g53<1>F g23.4<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g69<1>F g24<0,1,0>F g2<8,8,1>F { align1 1H compacted }; mov(16) g71<1>UD 0x00000004UD { align1 1H compacted }; mov(16) g73<1>UD 0x00000001UD { align1 1H compacted }; mov(16) g57<1>UD 0x00000002UD { align1 1H compacted }; mov(16) g77<1>D -1082130432D { align1 1H }; mov(16) g45<1>UD 0x00000008UD { align1 1H compacted }; pln(16) g35<1>F g19<0,1,0>F g2<8,8,1>F { align1 1H compacted }; pln(16) g79<1>F g19.4<0,1,0>F g2<8,8,1>F { align1 1H compacted }; mov(16) g49<1>UD 0x00000000UD { align1 1H compacted }; mov(16) g47<1>D 1065353216D { align1 1H }; mul(16) g17<1>F g35<8,8,1>F g14<0,1,0>F { align1 1H compacted }; mul(16) g19<1>F g79<8,8,1>F g14.1<0,1,0>F { align1 1H compacted }; send(16) g2<1>UW g17<0,1,0>F 0x08840003 sampler MsgDesc: sample SIMD16 Surface = 3 Sampler = 0 mlen 4 rlen 8 { align1 1H }; sel.l(16) g25<1>F g8<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; END B0 ->B1 START B2 <-B1 <-B3 (262 cycles) mov(16) g126<1>F g49<8,8,1>D { align1 1H compacted }; END B1 ->B2 ->B4 mov(8) g27<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; mov(1) g27.2<1>UD 0x00000088UD { align1 WE_all 1N compacted }; send(16) g81<1>UD g27<0,1,0>UD 0x0228030b const MsgDesc: (11, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; cmp.ge.f0(16) null<1>F g126<8,8,1>F g81.2<0,1,0>F { align1 1H compacted }; (+f0) break(16) JIP: 344 UIP: 344 { align1 1H }; END B2 ->B1 ->B3 START B3 <-B2 (452 cycles) add(16) g27<1>D g49<8,8,1>D 32D { align1 1H compacted }; mov(8) g51<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; add(16) g49<1>D g49<8,8,1>D 1D { align1 1H compacted }; shl(16) g100<1>D g27<8,8,1>D 0x00000004UD { align1 1H }; mov(1) g51.2<1>UD 0x00000080UD { align1 WE_all 1N compacted }; send(16) g27<1>UW g100<0,1,0>UD 0x0484700b sampler MsgDesc: ld SIMD16 Surface = 11 Sampler = 0 mlen 2 rlen 8 { align1 1H }; send(16) g91<1>UD g51<0,1,0>UD 0x0228030b const MsgDesc: (11, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; math inv(16) g89<1>F (abs)g33<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g51<1>F g29<8,8,1>F g91.4<0,1,0>F { align1 1H compacted }; mul(16) g37<1>F g29<8,8,1>F g91.5<0,1,0>F { align1 1H compacted }; mul(16) g39<1>F g29<8,8,1>F g91.6<0,1,0>F { align1 1H compacted }; mad(16) g87<1>F g51<4,4,1>F g91.0<0,1,0>F g27<4,4,1>F { align16 1H }; mad(16) g51<1>F g37<4,4,1>F g91.1<0,1,0>F g27<4,4,1>F { align16 1H }; mad(16) g37<1>F g39<4,4,1>F g91.2<0,1,0>F g27<4,4,1>F { align16 1H }; mad(16) g39<1>F g87<4,4,1>F g92.0<0,1,0>F g31<4,4,1>F { align16 1H }; mad(16) g87<1>F g51<4,4,1>F g92.1<0,1,0>F g31<4,4,1>F { align16 1H }; mad(16) g93<1>F g37<4,4,1>F g92.2<0,1,0>F g31<4,4,1>F { align16 1H }; add(16) g28<1>F g39<8,8,1>F g92.4<0,1,0>F { align1 1H compacted }; add(16) g30<1>F g87<8,8,1>F g92.5<0,1,0>F { align1 1H compacted }; add(16) g32<1>F g93<8,8,1>F g92.6<0,1,0>F { align1 1H compacted }; add(16) g51<1>F -g28<8,8,1>F g67<8,8,1>F { align1 1H compacted }; add(16) g37<1>F -g30<8,8,1>F g65<8,8,1>F { align1 1H compacted }; add(16) g39<1>F -g32<8,8,1>F g75<8,8,1>F { align1 1H compacted }; mul(16) g87<1>F g51<8,8,1>F g89<8,8,1>F { align1 1H compacted }; mul(16) g91<1>F g37<8,8,1>F g89<8,8,1>F { align1 1H compacted }; mul(16) g93<1>F g39<8,8,1>F g89<8,8,1>F { align1 1H compacted }; mul(16) g95<1>F g91<8,8,1>F g91<8,8,1>F { align1 1H compacted }; mad(16) g97<1>F g95<4,4,1>F g87<4,4,1>F g87<4,4,1>F { align16 1H compacted }; mad(16) g99<1>F g97<4,4,1>F g93<4,4,1>F g93<4,4,1>F { align16 1H compacted }; sel.l(16) g101<1>F g99<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; sel.l(16) g47<1>F g101<8,8,1>F g47<8,8,1>F { align1 1H compacted }; while(16) JIP: -392 { align1 1H }; END B3 ->B2 START B4 <-B1 (8 cycles) mov(16) g51<1>UD 0x00000000UD { align1 1H compacted }; mov(16) g49<1>UD 0x00000000UD { align1 1H compacted }; END B4 ->B5 START B6 <-B5 <-B7 (40 cycles) mov(16) g102<1>F g51<8,8,1>D { align1 1H compacted }; END B5 ->B6 ->B8 cmp.ge.f0(16) null<1>F g102<8,8,1>F g81<0,1,0>F { align1 1H compacted }; (+f0) break(16) JIP: 376 UIP: 376 { align1 1H }; END B6 ->B5 ->B7 START B7 <-B6 (522 cycles) shl(16) g101<1>D g51<8,8,1>D 0x00000004UD { align1 1H }; mov(8) g37<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; add(16) g51<1>D g51<8,8,1>D 1D { align1 1H compacted }; send(16) g27<1>UW g101<0,1,0>UD 0x0484700b sampler MsgDesc: ld SIMD16 Surface = 11 Sampler = 0 mlen 2 rlen 8 { align1 1H }; mov(1) g37.2<1>UD 0x00000080UD { align1 WE_all 1N compacted }; send(16) g93<1>UD g37<0,1,0>UD 0x0228030b const MsgDesc: (11, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; math inv(16) g91<1>F g33<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g37<1>F g29<8,8,1>F g93.4<0,1,0>F { align1 1H compacted }; mul(16) g39<1>F g29<8,8,1>F g93.5<0,1,0>F { align1 1H compacted }; mul(16) g87<1>F g29<8,8,1>F g93.6<0,1,0>F { align1 1H compacted }; mad(16) g89<1>F g37<4,4,1>F g93.0<0,1,0>F g27<4,4,1>F { align16 1H }; mad(16) g37<1>F g39<4,4,1>F g93.1<0,1,0>F g27<4,4,1>F { align16 1H }; mad(16) g39<1>F g87<4,4,1>F g93.2<0,1,0>F g27<4,4,1>F { align16 1H }; mad(16) g87<1>F g89<4,4,1>F g94.0<0,1,0>F g31<4,4,1>F { align16 1H }; mad(16) g89<1>F g37<4,4,1>F g94.1<0,1,0>F g31<4,4,1>F { align16 1H }; mad(16) g37<1>F g39<4,4,1>F g94.2<0,1,0>F g31<4,4,1>F { align16 1H }; add(16) g103<1>F g87<8,8,1>F g94.4<0,1,0>F { align1 1H compacted }; add(16) g105<1>F g89<8,8,1>F g94.5<0,1,0>F { align1 1H compacted }; add(16) g107<1>F g37<8,8,1>F g94.6<0,1,0>F { align1 1H compacted }; add(16) g109<1>F -g103<8,8,1>F g67<8,8,1>F { align1 1H compacted }; add(16) g111<1>F -g105<8,8,1>F g65<8,8,1>F { align1 1H compacted }; add(16) g113<1>F -g107<8,8,1>F g75<8,8,1>F { align1 1H compacted }; mul(16) g115<1>F g109<8,8,1>F g91<8,8,1>F { align1 1H compacted }; mul(16) g117<1>F g111<8,8,1>F g91<8,8,1>F { align1 1H compacted }; mul(16) g119<1>F g113<8,8,1>F g91<8,8,1>F { align1 1H compacted }; mul(16) g121<1>F g117<8,8,1>F g117<8,8,1>F { align1 1H compacted }; mad(16) g123<1>F g121<4,4,1>F g115<4,4,1>F g115<4,4,1>F { align16 1H compacted }; mad(16) g125<1>F g123<4,4,1>F g119<4,4,1>F g119<4,4,1>F { align16 1H compacted }; add(16) g27<1>F -g125<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; sel.ge(16) g29<1>F g27<8,8,1>F 0x0F /* 0F */ { align1 1H compacted }; add(16) g31<1>F g29<8,8,1>F g29<8,8,1>F { align1 1H compacted }; mul(16) g33<1>F g29<8,8,1>F g31<8,8,1>F { align1 1H compacted }; sel.l(16) g37<1>F g33<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; sel.ge(16) g49<1>F g49<8,8,1>F g37<8,8,1>F { align1 1H compacted }; while(16) JIP: -392 { align1 1H }; END B7 ->B6 START B8 <-B5 (1178 cycles) add(16) g38<1>F g47<8,8,1>F g49<8,8,1>F { align1 1H compacted }; add(16) g91<1>F g85<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1H }; add(16) g93<1>F g41<8,8,1>F 0x3f000000F /* 0.5F */ { align1 1H }; mov(8) g98<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; mov(16) g87<1>UD 0x00000010UD { align1 1H compacted }; add.sat(16) g89<1>F g38<8,8,1>F g43<8,8,1>F { align1 1H compacted }; mov(16) g37<1>UD g91<8,8,1>F { align1 1H compacted }; mad(16) g95<1>F g10.1<0,1,0>F g10.0<0,1,0>F g93<4,4,1>F { align16 1H }; mov(1) g98.2<1>UD 0x00000018UD { align1 WE_all 1N compacted }; cmp.l.f0(16) g81<1>F g89<8,8,1>F 0x3f7d70a4F /* 0.99F */ { align1 1H }; shr(16) g100<1>UD g37<8,8,1>UD 0x00000001UD { align1 1H compacted }; mov(16) g39<1>UD g95<8,8,1>F { align1 1H compacted }; send(16) g96<1>UD g98<0,1,0>UD 0x02280307 const MsgDesc: (7, 3, 0, 0) mlen 1 rlen 2 { align1 WE_all 1H }; shr(16) g102<1>UD g39<8,8,1>UD 0x00000001UD { align1 1H compacted }; mov(16) g98<1>UD g97.2<0,1,0>F { align1 1H compacted }; shr(16) g104<1>UD g98<8,8,1>UD 0x00000001UD { align1 1H compacted }; mul(16) g106<1>D g102<8,8,1>D g104<8,8,1>D { align1 1H compacted }; add(16) g108<1>D g106<8,8,1>D g100<8,8,1>D { align1 1H compacted }; shl(16) g27<1>D g108<8,8,1>D 0x00000002UD { align1 1H }; shl(16) g109<1>D g27<8,8,1>D 0x00000003UD { align1 1H }; bfi2(16) g111<1>UD g45<4,4,1>UD g37<4,4,1>UD g109<4,4,1>UD { align16 1H }; bfi2(16) g113<1>UD g87<4,4,1>UD g39<4,4,1>UD g111<4,4,1>UD { align16 1H }; shr(16) g115<1>UD g113<8,8,1>UD 0x00000002UD { align1 1H compacted }; shl(16) g45<1>D g115<8,8,1>D 0x00000002UD { align1 1H }; add(16) g118<1>D g115<8,8,1>D 1D { align1 1H compacted }; send(16) g116<1>UW g45<8,8,1>UD 0x04205e0e dp data 1 MsgDesc: ( untyped surface read, Surface = 14, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; shl(16) g49<1>D g118<8,8,1>D 0x00000002UD { align1 1H }; send(16) g119<1>UW g49<8,8,1>UD 0x04205e0e dp data 1 MsgDesc: ( untyped surface read, Surface = 14, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0(16) null<1>F g119<8,8,1>F g83<8,8,1>F { align1 1H compacted }; (-f0) sel(16) g120<1>UD g77<8,8,1>UD 0x3f800000UD { align1 1H }; cmp.z.f0(16) null<1>F g116<8,8,1>F g83<8,8,1>F { align1 1H compacted }; (-f0) sel(16) g29<1>UD g120<8,8,1>UD 0x00000000UD { align1 1H }; cmp.ge.f0(16) g121<1>F g29<8,8,1>F 0x0F /* 0F */ { align1 1H compacted }; cmp.g.f0(16) g123<1>F g25<8,8,1>F 0x3ca3d70aF /* 0.02F */ { align1 1H }; and.nz.f0(16) null<1>UD g123<8,8,1>UD g121<8,8,1>UD { align1 1H compacted }; (+f0) if(16) JIP: 1776 UIP: 1776 { align1 1H }; END B8 ->B9 ->B16 START B9 <-B8 (934 cycles) mul(16) g31<1>F g35<8,8,1>F g13.6<0,1,0>F { align1 1H compacted }; mul(16) g33<1>F g79<8,8,1>F g13.7<0,1,0>F { align1 1H compacted }; mul(16) g83<1>F g35<8,8,1>F g13.4<0,1,0>F { align1 1H compacted }; mul(16) g85<1>F g79<8,8,1>F g13.5<0,1,0>F { align1 1H compacted }; mul(16) g41<1>F g35<8,8,1>F g14.2<0,1,0>F { align1 1H compacted }; mul(16) g43<1>F g79<8,8,1>F g14.3<0,1,0>F { align1 1H compacted }; mul(16) g87<1>F g61<8,8,1>F g61<8,8,1>F { align1 1H compacted }; add(16) g89<1>F (abs)g53<8,8,1>F (abs)g55<8,8,1>F { align1 1H compacted }; mov(1) g10<1>F 0x3d981627F /* 0.074261F */ { align1 WE_all 1N }; add(16) g91<1>F -g25<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mov(16) g99<1>F g16<0,1,0>F { align1 1H compacted }; mov(16) g93<1>UD 0x0000003fUD { align1 1H compacted }; mov(16) g95<1>UD g29<8,8,1>F { align1 1H compacted }; shl(16) g97<1>D g27<8,8,1>D 0x00000001UD { align1 1H }; mul(16) g79<1>F g65<8,8,1>F g65<8,8,1>F { align1 1H compacted }; mov(1) g10.1<1>F 0x3c996e30F /* 0.0187293F */ { align1 WE_all 1N }; send(16) g27<1>UW g31<0,1,0>F 0x08440104 sampler MsgDesc: sample SIMD16 Surface = 4 Sampler = 1 mlen 4 rlen 4 { align1 1H }; send(16) g31<1>UW g83<0,1,0>F 0x08640205 sampler MsgDesc: sample SIMD16 Surface = 5 Sampler = 2 mlen 4 rlen 6 { align1 1H }; send(16) g83<1>UW g41<0,1,0>F 0x08240306 sampler MsgDesc: sample SIMD16 Surface = 6 Sampler = 3 mlen 4 rlen 2 { align1 1H }; mad(16) g41<1>F g87<4,4,1>F g59<4,4,1>F g59<4,4,1>F { align16 1H compacted }; add(16) g43<1>F g89<8,8,1>F (abs)g69<8,8,1>F { align1 1H compacted }; mad(16) g85<1>F g79<4,4,1>F g67<4,4,1>F g67<4,4,1>F { align16 1H compacted }; bfi2(16) g87<1>UD g57<4,4,1>UD g37<4,4,1>UD g97<4,4,1>UD { align16 1H }; sel.l(16) g79<1>F g91<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mul(16) g57<1>F g27<8,8,1>F g13<0,1,0>F { align1 1H compacted }; mul(16) g37<1>F g29<8,8,1>F g13.1<0,1,0>F { align1 1H compacted }; mad(16) g29<1>F g41<4,4,1>F g63<4,4,1>F g63<4,4,1>F { align16 1H compacted }; mul.sat(16) g27<1>F g83<8,8,1>F g14.7<0,1,0>F { align1 1H compacted }; bfi2(16) g41<1>UD g71<4,4,1>UD g39<4,4,1>UD g87<4,4,1>UD { align16 1H }; math inv(16) g83<1>F g43<8,8,1>F null<8,8,1>F { align1 1H compacted }; mad(16) g13<1>F g85<4,4,1>F g75<4,4,1>F g75<4,4,1>F { align16 1H compacted }; mul(16) g71<1>F g37<8,8,1>F g11.5<0,1,0>F { align1 1H compacted }; mul(16) g43<1>F g37<8,8,1>F g11.4<0,1,0>F { align1 1H compacted }; mul(16) g85<1>F g79<8,8,1>F 0x42fe0000F /* 127F */ { align1 1H }; mul(16) g79<1>F g37<8,8,1>F g11.6<0,1,0>F { align1 1H compacted }; mul(16) g39<1>F g53<8,8,1>F g83<8,8,1>F { align1 1H compacted }; mul(16) g37<1>F g27<8,8,1>F 0x427c0000F /* 63F */ { align1 1H }; mad(16) g97<1>F g71<4,4,1>F g11.1<0,1,0>F g33<4,4,1>F { align16 1H }; math rsq(16) g27<1>F g13<8,8,1>F null<8,8,1>F { align1 1H compacted }; mov(16) g87<1>UD g85<8,8,1>F { align1 1H compacted }; mad(16) g101<1>F g79<4,4,1>F g11.2<0,1,0>F g35<4,4,1>F { align16 1H }; math rsq(16) g13<1>F g29<8,8,1>F null<8,8,1>F { align1 1H compacted }; add(16) g117<1>F -(abs)g39<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mov(16) g103<1>UD g37<8,8,1>F { align1 1H compacted }; mul.le.f0(16) g29<1>F g55<8,8,1>F g83<8,8,1>F { align1 1H compacted }; mul(16) g105<1>F g27<8,8,1>F g67<8,8,1>F { align1 1H compacted }; mul(16) g107<1>F g27<8,8,1>F g65<8,8,1>F { align1 1H compacted }; mul(16) g109<1>F g27<8,8,1>F g75<8,8,1>F { align1 1H compacted }; shl(16) g123<1>D g87<8,8,1>D 0x00000001UD { align1 1H }; add(16) g83<1>D g95<8,8,1>D g41<8,8,1>D { align1 1H compacted }; mul(16) g111<1>F g13<8,8,1>F g59<8,8,1>F { align1 1H compacted }; mul(16) g113<1>F g13<8,8,1>F g61<8,8,1>F { align1 1H compacted }; mul(16) g115<1>F g13<8,8,1>F g63<8,8,1>F { align1 1H compacted }; shl(16) g125<1>D g103<8,8,1>D 0x00000006UD { align1 1H }; mad.sat(16) g27<1>F g97<4,4,1>F g12.1<0,1,0>F g57<4,4,1>F { align16 1H }; add(16) g119<1>F -(abs)g29<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; (-f0) sel(16) g121<1>UD g77<8,8,1>UD 0x3f800000UD { align1 1H }; mad(16) g95<1>F g43<4,4,1>F g11.0<0,1,0>F g31<4,4,1>F { align16 1H }; mul(16) g35<1>F g53<8,8,1>F g107<8,8,1>F { align1 1H compacted }; and(16) g85<1>UD g123<8,8,1>UD 0x000000fcUD { align1 1H compacted }; mad.sat(16) g31<1>F g101<4,4,1>F g12.2<0,1,0>F g57<4,4,1>F { align16 1H }; mul(16) g43<1>F g27<8,8,1>F 0x42fe0000F /* 127F */ { align1 1H }; mul(16) g33<1>F g117<8,8,1>F g121<8,8,1>F { align1 1H compacted }; cmp.ge.f0(16) null<1>F -g39<8,8,1>F 0x0F /* 0F */ { align1 1H compacted }; mad.sat(16) g13<1>F g95<4,4,1>F g12.0<0,1,0>F g57<4,4,1>F { align16 1H }; mad(16) g95<1>F -g35<4,4,1>F g105<4,4,1>F -g55<4,4,1>F { align16 1H }; mov(16) g103<1>UD g43<8,8,1>F { align1 1H compacted }; mul(16) g67<1>F g31<8,8,1>F 0x427c0000F /* 63F */ { align1 1H }; csel.le(16) g90<1>F g33<4,4,1>F -g29<4,4,1>F -g69<4,4,1>F { align16 1H }; (-f0) sel(16) g88<1>UD g77<8,8,1>UD 0x3f800000UD { align1 1H }; mul(16) g41<1>F g13<8,8,1>F 0x42fe0000F /* 127F */ { align1 1H }; mad(16) g97<1>F g95<4,4,1>F g109<4,4,1>F -g69<4,4,1>F { align16 1H compacted }; mov(16) g117<1>UD g67<8,8,1>F { align1 1H compacted }; mul(16) g11<1>F g119<8,8,1>F g88<8,8,1>F { align1 1H compacted }; shl(16) g119<1>D g103<8,8,1>D 0x00000012UD { align1 1H }; mov(16) g101<1>UD g41<8,8,1>F { align1 1H compacted }; mad(16) g41<1>F g109<4,4,1>F -g69<4,4,1>F -g97<4,4,1>F { align16 1H }; shl(16) g121<1>D g117<8,8,1>D 0x0000000cUD { align1 1H }; mad(16) g33<1>F g105<4,4,1>F -g55<4,4,1>F -g97<4,4,1>F { align16 1H }; mad(16) g35<1>F g107<4,4,1>F -g53<4,4,1>F -g97<4,4,1>F { align16 1H }; csel.le(16) g13<1>F g11<4,4,1>F -g39<4,4,1>F -g69<4,4,1>F { align16 1H }; shl(16) g123<1>D g101<8,8,1>D 0x00000019UD { align1 1H }; mul(16) g43<1>F g35<8,8,1>F g35<8,8,1>F { align1 1H compacted }; add(16) g62<1>D g123<8,8,1>D g119<8,8,1>D { align1 1H compacted }; mad(16) g67<1>F g43<4,4,1>F g33<4,4,1>F g33<4,4,1>F { align16 1H compacted }; add(16) g71<1>D g62<8,8,1>D g121<8,8,1>D { align1 1H compacted }; add(16) g57<1>D g71<8,8,1>D g125<8,8,1>D { align1 1H compacted }; mad(16) g65<1>F g67<4,4,1>F g41<4,4,1>F g41<4,4,1>F { align16 1H compacted }; bfi2(16) g31<1>UD g93<4,4,1>UD g99<4,4,1>UD g57<4,4,1>UD { align16 1H }; math rsq(16) g59<1>F g65<8,8,1>F null<8,8,1>F { align1 1H compacted }; mul(16) g61<1>F g59<8,8,1>F g33<8,8,1>F { align1 1H compacted }; mul(16) g63<1>F g59<8,8,1>F g35<8,8,1>F { align1 1H compacted }; mul(16) g71<1>F g59<8,8,1>F g41<8,8,1>F { align1 1H compacted }; mul(16) g79<1>F g53<8,8,1>F g61<8,8,1>F { align1 1H compacted }; mul(16) g57<1>F g69<8,8,1>F g63<8,8,1>F { align1 1H compacted }; mul(16) g92<1>F g113<8,8,1>F g63<8,8,1>F { align1 1H compacted }; mul(16) g77<1>F g55<8,8,1>F g71<8,8,1>F { align1 1H compacted }; mad(16) g87<1>F g79<4,4,1>F g63<4,4,1>F -g55<4,4,1>F { align16 1H compacted }; mad(16) g37<1>F g57<4,4,1>F g71<4,4,1>F -g53<4,4,1>F { align16 1H compacted }; mad(16) g94<1>F g92<4,4,1>F g61<4,4,1>F g111<4,4,1>F { align16 1H compacted }; mad(16) g39<1>F g77<4,4,1>F g61<4,4,1>F -g69<4,4,1>F { align16 1H compacted }; mad(16) g96<1>F g94<4,4,1>F g71<4,4,1>F g115<4,4,1>F { align16 1H compacted }; mul(16) g98<1>F g113<8,8,1>F g39<8,8,1>F { align1 1H compacted }; mad(16) g100<1>F g98<4,4,1>F g37<4,4,1>F g111<4,4,1>F { align16 1H compacted }; add(16) g103<1>F -(abs)g96<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mad(16) g107<1>F g10.0<0,1,0>F -g10.1<0,1,0>F (abs)g96<4,4,1>F { align16 1H }; mov(1) g10.2<1>F 0x3e593484F /* 0.212114F */ { align1 WE_all 1N }; math sqrt(16) g105<1>F g103<8,8,1>F null<8,8,1>F { align1 1H compacted }; mad(16) g102<1>F g100<4,4,1>F g87<4,4,1>F g115<4,4,1>F { align16 1H compacted }; mad(16) g109<1>F -g10.2<0,1,0>F (abs)g96<4,4,1>F g107<4,4,1>F { align16 1H }; mov(1) g10.3<1>F 0x3fc90da4F /* 1.57073F */ { align1 WE_all 1N }; mul(16) g113<1>F g105<8,8,1>F 0x3f22f984F /* 0.63662F */ { align1 1H }; cmp.ge.f0(16) g98<1>F g102<8,8,1>F 0x0F /* 0F */ { align1 1H compacted }; cmp.z.f0(16) g124<1>D g81<8,8,1>D 0D { align1 1H compacted }; mad(16) g111<1>F g10.3<0,1,0>F (abs)g96<4,4,1>F g109<4,4,1>F { align16 1H }; mov(1) g10.4<1>F 0x3f000000F /* 0.5F */ { align1 WE_all 1N }; mad.sat(16) g122<1>F g10.4<0,1,0>F g10.4<0,1,0>F g90<4,4,1>F { align16 1H }; mad.sat(16) g126<1>F g10.4<0,1,0>F g10.4<0,1,0>F g13<4,4,1>F { align16 1H }; mul(16) g115<1>F g113<8,8,1>F g111<8,8,1>F { align1 1H compacted }; mul(16) g11<1>F g122<8,8,1>F 0x43ff8000F /* 511F */ { align1 1H }; sel.l(16) g117<1>F g115<8,8,1>F 0x3f800000F /* 1F */ { align1 1H }; mul(16) g13<1>F g126<8,8,1>F 0x43ff8000F /* 511F */ { align1 1H }; mov(16) g33<1>UD g11<8,8,1>F { align1 1H compacted }; mul(16) g119<1>F g117<8,8,1>F 0x427c0000F /* 63F */ { align1 1H }; mov(16) g35<1>UD g13<8,8,1>F { align1 1H compacted }; shl(16) g41<1>D g33<8,8,1>D 0x00000011UD { align1 1H }; mov(16) g121<1>UD g119<8,8,1>F { align1 1H compacted }; shl(16) g43<1>D g35<8,8,1>D 0x00000008UD { align1 1H }; shl(16) g67<1>D g121<8,8,1>D 0x0000001aUD { align1 1H }; add(16) g65<1>D g67<8,8,1>D g41<8,8,1>D { align1 1H compacted }; add(16) g59<1>D g65<8,8,1>D g43<8,8,1>D { align1 1H compacted }; add(16) g61<1>D g59<8,8,1>D g85<8,8,1>D { align1 1H compacted }; bfi2(16) g29<1>UD g73<4,4,1>UD g98<4,4,1>UD g61<4,4,1>UD { align16 1H }; (+f0) if(16) JIP: 64 UIP: 64 { align1 1H }; END B9 ->B10 ->B11 START B10 <-B9 (24 cycles) shl(16) g27<1>D g83<8,8,1>D 0x00000004UD { align1 1H }; mov(1) f1<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f1) send(16) null<1>UW g27<8,8,1>UD 0x0c025c0d dp data 1 MsgDesc: ( DC untyped surface write, Surface = 13, SIMD16, Mask = 0xc) mlen 6 rlen 0 { align1 1H }; END B10 ->B11 START B11 <-B9 <-B10 (26 cycles) endif(16) JIP: 192 { align1 1H }; mov.nz.f0(16) null<1>D g124<8,8,1>D { align1 1H }; (+f0) if(16) JIP: 56 UIP: 56 { align1 1H }; END B11 ->B12 ->B13 START B12 <-B11 (24 cycles) mov(16) g47<1>UD g16.1<0,1,0>UD { align1 1H compacted }; mov(1) f1<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f1) send(16) null<1>UW g45<8,8,1>UD 0x08025e0c dp data 1 MsgDesc: ( DC untyped surface write, Surface = 12, SIMD16, Mask = 0xe) mlen 4 rlen 0 { align1 1H }; END B12 ->B13 START B13 <-B11 <-B12 (26 cycles) endif(16) JIP: 104 { align1 1H }; mov.nz.f0(16) null<1>D g124<8,8,1>D { align1 1H }; (+f0) if(16) JIP: 56 UIP: 56 { align1 1H }; END B13 ->B14 ->B15 START B14 <-B13 (24 cycles) mov(16) g51<1>UD g16.2<0,1,0>UD { align1 1H compacted }; mov(1) f1<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f1) send(16) null<1>UW g49<8,8,1>UD 0x08025e0c dp data 1 MsgDesc: ( DC untyped surface write, Surface = 12, SIMD16, Mask = 0xe) mlen 4 rlen 0 { align1 1H }; END B14 ->B15 START B15 <-B13 <-B14 (4 cycles) endif(16) JIP: 16 { align1 1H }; END B15 ->B16 START B16 <-B8 <-B15 (272 cycles) endif(16) JIP: 16 { align1 1H }; mul(16) g78<1>F g25<8,8,1>F g15<0,1,0>F { align1 1H compacted }; mul(16) g37<1>F g25<8,8,1>F g15.1<0,1,0>F { align1 1H compacted }; mul(16) g39<1>F g25<8,8,1>F g15.2<0,1,0>F { align1 1H compacted }; add(16) g87<1>F -g75<8,8,1>F 0xc1f00000F /* -30F */ { align1 1H }; mov.sat(16) g91<1>F g8<8,8,1>F { align1 1H compacted }; mov(1) g10.5<1>F 0x3f800000F /* 1F */ { align1 WE_all 1N }; (+f0.1) cmp.z.f0.1(16) null<1>D g81<8,8,1>D 0D { align1 1H compacted }; mul(16) g19<1>F g78<8,8,1>F g15.3<0,1,0>F { align1 1H compacted }; mul(16) g21<1>F g37<8,8,1>F g15.3<0,1,0>F { align1 1H compacted }; mul(16) g23<1>F g39<8,8,1>F g15.3<0,1,0>F { align1 1H compacted }; sel.ge(16) g89<1>F g87<8,8,1>F 0x0F /* 0F */ { align1 1H compacted }; mul(16) g93<1>F g89<8,8,1>F 0x3c888889F /* 0.0166667F */ { align1 1H }; mad(16) g95<1>F g10.5<0,1,0>F g93<4,4,1>F -g93<4,4,1>F { align16 1H }; sel.ge(16) g97<1>F g95<8,8,1>F 0x0F /* 0F */ { align1 1H compacted }; mul(16) g10<1>F g97<8,8,1>F g91<8,8,1>F { align1 1H compacted }; mov(16) g17<1>UD g0<8,8,1>UD { align1 WE_all 1H compacted }; mov(1) g18.14<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; (+f0.1) sendc(16) null<1>UW g17<0,1,0>F 0x140b0000 render MsgDesc: RT write SIMD16 Surface = 0 mlen 10 rlen 0 { align1 1H }; mov(16) g2<1>UD g0<8,8,1>UD { align1 WE_all 1H compacted }; mov(1) g2.2<1>UD 0x00000001UD { align1 WE_all 1N compacted }; mov(1) g3.14<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; mov(16) g4<1>F -g75<8,8,1>F { align1 1H compacted }; mov(16) g6<1>F 0x477fe000F /* 65504F */ { align1 1H }; mov(16) g8<1>F 0x477fe000F /* 65504F */ { align1 1H }; (+f0.1) sendc(16) null<1>UW g2<0,1,0>F 0x140b0001 render MsgDesc: RT write SIMD16 Surface = 1 mlen 10 rlen 0 { align1 1H }; mov(16) g118<1>UD g0<8,8,1>UD { align1 WE_all 1H compacted }; mov(1) g118.2<1>UD 0x00000002UD { align1 WE_all 1N compacted }; mov(1) g119.14<1>UW f0.1<0,1,0>UW { align1 WE_all 1N }; mov(16) g120<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1H compacted }; mov(16) g122<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1H compacted }; mov(16) g124<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1H compacted }; mov(16) g126<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1H compacted }; (+f0.1) sendc(16) null<1>UW g118<0,1,0>F 0x940b1002 render MsgDesc: RT write SIMD16 LastRT Surface = 2 mlen 10 rlen 0 { align1 1H EOT }; END B16 shaders/closed/steam/deus-ex-mankind-divided/2354.shader_test - FS SIMD8 shader: 326 inst, 2 loops, 14465 cycles, 0:0 spills:fills, Promoted 6 constants, compacted 5216 to 3696 bytes. shaders/closed/steam/deus-ex-mankind-divided/2354.shader_test - FS SIMD16 shader: 326 inst, 2 loops, 15698 cycles, 0:0 spills:fills, Promoted 6 constants, compacted 5216 to 3680 bytes. Thread 0 took 1.11 seconds and compiled 1 shaders (not including SIMD16) with 1 GL context switches