Mesa (bug-109980): ir3/compiler: Enable lower_io_offsets pass and handle new SSBO intrinsics

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Thu Mar 14 13:04:17 UTC 2019


Module: Mesa
Branch: bug-109980
Commit: 2e4525883f0744b0c8df9792ded597090a8ad987
URL:    http://cgit.freedesktop.org/mesa/mesa/commit/?id=2e4525883f0744b0c8df9792ded597090a8ad987

Author: Eduardo Lima Mitev <elima at igalia.com>
Date:   Tue Feb 26 14:07:04 2019 +0100

ir3/compiler: Enable lower_io_offsets pass and handle new SSBO intrinsics

These intrinsics have the offset in dwords already computed in the last
source, so the change here is basically using that instead of emitting
the ir3_SHR to divide the byte-offset by 4.

The improvement in shader stats is significant, of up to ~15% in
instruction count in some cases. Tested only on a5xx.

shader-db is unfortunately not very useful here because shaders that use
SSBO require GLSL versions that are not supported by freedreno yet.

For examples, most Khronos CTS tests under 'dEQP-GLES31.functional.ssbo.*'
are helped.

A random case:

dEQP-GLES31.functional.ssbo.layout.2_level_array.packed.row_major_mat3x2

with current master:

; CL prog 14/1: 1252 instructions, 0 half, 48 full
; 8 const, 8 constlen
; 61 (ss), 43 (sy)

with the SSBO dword-offset moved to NIR:

; CL prog 14/1: 1053 instructions, 0 half, 45 full
; 7 const, 7 constlen
; 34 (ss), 73 (sy)

The SHR previously emitted for every single SSBO instruction disappears
in most cases, and the dword-offset ends up embedded in the STGB
instruction as immediate in many cases as well.

There are also a few of those tests that are currently failing on register
allocation, that start to pass as a result of reducing the pressure. At least
these, probably more:

dEQP-GLES31.functional.ssbo.layout.random.unsized_arrays.24
dEQP-GLES31.functional.ssbo.layout.random.arrays_of_arrays.6
dEQP-GLES31.functional.ssbo.layout.random.arrays_of_arrays.17
dEQP-GLES31.functional.ssbo.layout.random.nested_structs_arrays.14
dEQP-GLES31.functional.ssbo.layout.random.nested_structs_arrays_instance_arrays.5
dEQP-GLES31.functional.ssbo.layout.random.nested_structs_arrays_instance_arrays.7

No regressions observed with relevant CTS and piglit tests.

Reviewed-by: Rob Clark <robdclark at gmail.com>

---

 src/freedreno/ir3/ir3_a4xx.c         | 49 ++++++++++++++++++++----------------
 src/freedreno/ir3/ir3_a6xx.c         | 41 +++++++++++-------------------
 src/freedreno/ir3/ir3_compiler_nir.c | 28 ++++++++++++---------
 src/freedreno/ir3/ir3_nir.c          |  1 +
 4 files changed, 59 insertions(+), 60 deletions(-)

diff --git a/src/freedreno/ir3/ir3_a4xx.c b/src/freedreno/ir3/ir3_a4xx.c
index 1f86cd5533c..609ecf00c1e 100644
--- a/src/freedreno/ir3/ir3_a4xx.c
+++ b/src/freedreno/ir3/ir3_a4xx.c
@@ -40,7 +40,7 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 		struct ir3_instruction **dst)
 {
 	struct ir3_block *b = ctx->block;
-	struct ir3_instruction *ldgb, *src0, *src1, *offset;
+	struct ir3_instruction *ldgb, *src0, *src1, *byte_offset, *offset;
 	nir_const_value *const_offset;
 
 	/* can this be non-const buffer_index?  how do we handle that? */
@@ -49,14 +49,15 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 
 	int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, const_offset->u32[0]);
 
-	offset = ir3_get_src(ctx, &intr->src[1])[0];
+	byte_offset = ir3_get_src(ctx, &intr->src[1])[0];
+	offset = ir3_get_src(ctx, &intr->src[2])[0];
 
 	/* src0 is uvec2(offset*4, 0), src1 is offset.. nir already *= 4: */
 	src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-		offset,
+		byte_offset,
 		create_immed(b, 0),
 	}, 2);
-	src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
+	src1 = offset;
 
 	ldgb = ir3_LDGB(b, create_immed(b, ibo_idx), 0,
 			src0, 0, src1, 0);
@@ -75,7 +76,7 @@ static void
 emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
 	struct ir3_block *b = ctx->block;
-	struct ir3_instruction *stgb, *src0, *src1, *src2, *offset;
+	struct ir3_instruction *stgb, *src0, *src1, *src2, *byte_offset, *offset;
 	nir_const_value *const_offset;
 	/* TODO handle wrmask properly, see _store_shared().. but I think
 	 * it is more a PITA than that, since blob ends up loading the
@@ -90,15 +91,16 @@ emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 
 	int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping,  const_offset->u32[0]);
 
-	offset = ir3_get_src(ctx, &intr->src[2])[0];
+	byte_offset = ir3_get_src(ctx, &intr->src[2])[0];
+	offset = ir3_get_src(ctx, &intr->src[3])[0];
 
 	/* src0 is value, src1 is offset, src2 is uvec2(offset*4, 0)..
 	 * nir already *= 4:
 	 */
 	src0 = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
-	src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
+	src1 = offset;
 	src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-		offset,
+		byte_offset,
 		create_immed(b, 0),
 	}, 2);
 
@@ -133,7 +135,8 @@ static struct ir3_instruction *
 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
 	struct ir3_block *b = ctx->block;
-	struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *offset;
+	struct ir3_instruction *atomic, *ssbo, *src0, *src1, *src2, *byte_offset,
+		*offset;
 	nir_const_value *const_offset;
 	type_t type = TYPE_U32;
 
@@ -144,7 +147,8 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 	int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping,  const_offset->u32[0]);
 	ssbo = create_immed(b, ibo_idx);
 
-	offset = ir3_get_src(ctx, &intr->src[1])[0];
+	byte_offset = ir3_get_src(ctx, &intr->src[1])[0];
+	offset = ir3_get_src(ctx, &intr->src[3])[0];
 
 	/* src0 is data (or uvec2(data, compare))
 	 * src1 is offset
@@ -153,48 +157,49 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 	 * Note that nir already multiplies the offset by four
 	 */
 	src0 = ir3_get_src(ctx, &intr->src[2])[0];
-	src1 = ir3_SHR_B(b, offset, 0, create_immed(b, 2), 0);
+	src1 = offset;
 	src2 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
-		offset,
+		byte_offset,
 		create_immed(b, 0),
 	}, 2);
 
 	switch (intr->intrinsic) {
-	case nir_intrinsic_ssbo_atomic_add:
+	case nir_intrinsic_ssbo_atomic_add_ir3:
 		atomic = ir3_ATOMIC_ADD_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_imin:
+	case nir_intrinsic_ssbo_atomic_imin_ir3:
 		atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		type = TYPE_S32;
 		break;
-	case nir_intrinsic_ssbo_atomic_umin:
+	case nir_intrinsic_ssbo_atomic_umin_ir3:
 		atomic = ir3_ATOMIC_MIN_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_imax:
+	case nir_intrinsic_ssbo_atomic_imax_ir3:
 		atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		type = TYPE_S32;
 		break;
-	case nir_intrinsic_ssbo_atomic_umax:
+	case nir_intrinsic_ssbo_atomic_umax_ir3:
 		atomic = ir3_ATOMIC_MAX_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_and:
+	case nir_intrinsic_ssbo_atomic_and_ir3:
 		atomic = ir3_ATOMIC_AND_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_or:
+	case nir_intrinsic_ssbo_atomic_or_ir3:
 		atomic = ir3_ATOMIC_OR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_xor:
+	case nir_intrinsic_ssbo_atomic_xor_ir3:
 		atomic = ir3_ATOMIC_XOR_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_exchange:
+	case nir_intrinsic_ssbo_atomic_exchange_ir3:
 		atomic = ir3_ATOMIC_XCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_comp_swap:
+	case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
 		/* for cmpxchg, src0 is [ui]vec2(data, compare): */
 		src0 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
 			ir3_get_src(ctx, &intr->src[3])[0],
 			src0,
 		}, 2);
+		src1 = ir3_get_src(ctx, &intr->src[4])[0];
 		atomic = ir3_ATOMIC_CMPXCHG_G(b, ssbo, 0, src0, 0, src1, 0, src2, 0);
 		break;
 	default:
diff --git a/src/freedreno/ir3/ir3_a6xx.c b/src/freedreno/ir3/ir3_a6xx.c
index 00260a4c534..048b84c3370 100644
--- a/src/freedreno/ir3/ir3_a6xx.c
+++ b/src/freedreno/ir3/ir3_a6xx.c
@@ -38,17 +38,6 @@
  */
 
 
-static struct ir3_instruction *
-ssbo_offset(struct ir3_block *b, struct ir3_instruction *byte_offset)
-{
-	/* TODO hardware wants offset in terms of elements, not bytes.  Which
-	 * is kinda nice but opposite of what nir does.  It would be nice if
-	 * we had a way to request the units of the offset to avoid the extra
-	 * shift instructions..
-	 */
-	return ir3_SHR_B(b, byte_offset, 0, create_immed(b, 2), 0);
-}
-
 /* src[] = { buffer_index, offset }. No const_index */
 static void
 emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
@@ -65,7 +54,7 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
 
 	int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping, buffer_index->u32[0]);
 
-	offset = ssbo_offset(b, ir3_get_src(ctx, &intr->src[1])[0]);
+	offset = ir3_get_src(ctx, &intr->src[2])[0];
 
 	ldib = ir3_LDIB(b, create_immed(b, ibo_idx), 0, offset, 0);
 	ldib->regs[0]->wrmask = MASK(intr->num_components);
@@ -101,7 +90,7 @@ emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 	/* src0 is offset, src1 is value:
 	 */
 	val = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
-	offset = ssbo_offset(b, ir3_get_src(ctx, &intr->src[2])[0]);
+	offset = ir3_get_src(ctx, &intr->src[3])[0];
 
 	stib = ir3_STIB(b, create_immed(b, ibo_idx), 0, offset, 0, val, 0);
 	stib->cat6.iim_val = ncomp;
@@ -134,7 +123,7 @@ static struct ir3_instruction *
 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 {
 	struct ir3_block *b = ctx->block;
-	struct ir3_instruction *atomic, *ibo, *src0, *src1, *offset, *data, *dummy;
+	struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy;
 	nir_const_value *buffer_index;
 	type_t type = TYPE_U32;
 
@@ -145,7 +134,6 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 	int ibo_idx = ir3_ssbo_to_ibo(&ctx->so->image_mapping,  buffer_index->u32[0]);
 	ibo = create_immed(b, ibo_idx);
 
-	offset = ir3_get_src(ctx, &intr->src[1])[0];
 	data   = ir3_get_src(ctx, &intr->src[2])[0];
 
 	/* So this gets a bit creative:
@@ -163,50 +151,51 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 	 * Note that nir already multiplies the offset by four
 	 */
 	dummy = create_immed(b, 0);
-	src0 = ssbo_offset(b, offset);
 
 	if (intr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) {
+		src0 = ir3_get_src(ctx, &intr->src[4])[0];
 		struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[3])[0];
 		src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
 			dummy, compare, data
 		}, 3);
 	} else {
+		src0 = ir3_get_src(ctx, &intr->src[3])[0];
 		src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
 			dummy, data
 		}, 2);
 	}
 
 	switch (intr->intrinsic) {
-	case nir_intrinsic_ssbo_atomic_add:
+	case nir_intrinsic_ssbo_atomic_add_ir3:
 		atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_imin:
+	case nir_intrinsic_ssbo_atomic_imin_ir3:
 		atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
 		type = TYPE_S32;
 		break;
-	case nir_intrinsic_ssbo_atomic_umin:
+	case nir_intrinsic_ssbo_atomic_umin_ir3:
 		atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_imax:
+	case nir_intrinsic_ssbo_atomic_imax_ir3:
 		atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
 		type = TYPE_S32;
 		break;
-	case nir_intrinsic_ssbo_atomic_umax:
+	case nir_intrinsic_ssbo_atomic_umax_ir3:
 		atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_and:
+	case nir_intrinsic_ssbo_atomic_and_ir3:
 		atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_or:
+	case nir_intrinsic_ssbo_atomic_or_ir3:
 		atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_xor:
+	case nir_intrinsic_ssbo_atomic_xor_ir3:
 		atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_exchange:
+	case nir_intrinsic_ssbo_atomic_exchange_ir3:
 		atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
-	case nir_intrinsic_ssbo_atomic_comp_swap:
+	case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
 		atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0);
 		break;
 	default:
diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c
index 1e3fbeb3117..72549fef70d 100644
--- a/src/freedreno/ir3/ir3_compiler_nir.c
+++ b/src/freedreno/ir3/ir3_compiler_nir.c
@@ -1156,25 +1156,29 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
 			}
 		}
 		break;
-	case nir_intrinsic_load_ssbo:
+		/* All SSBO intrinsics should have been lowered by 'lower_io_offsets'
+		 * pass and replaced by an ir3-specifc version that adds the
+		 * dword-offset in the last source.
+		 */
+	case nir_intrinsic_load_ssbo_ir3:
 		ctx->funcs->emit_intrinsic_load_ssbo(ctx, intr, dst);
 		break;
-	case nir_intrinsic_store_ssbo:
+	case nir_intrinsic_store_ssbo_ir3:
 		ctx->funcs->emit_intrinsic_store_ssbo(ctx, intr);
 		break;
 	case nir_intrinsic_get_buffer_size:
 		emit_intrinsic_ssbo_size(ctx, intr, dst);
 		break;
-	case nir_intrinsic_ssbo_atomic_add:
-	case nir_intrinsic_ssbo_atomic_imin:
-	case nir_intrinsic_ssbo_atomic_umin:
-	case nir_intrinsic_ssbo_atomic_imax:
-	case nir_intrinsic_ssbo_atomic_umax:
-	case nir_intrinsic_ssbo_atomic_and:
-	case nir_intrinsic_ssbo_atomic_or:
-	case nir_intrinsic_ssbo_atomic_xor:
-	case nir_intrinsic_ssbo_atomic_exchange:
-	case nir_intrinsic_ssbo_atomic_comp_swap:
+	case nir_intrinsic_ssbo_atomic_add_ir3:
+	case nir_intrinsic_ssbo_atomic_imin_ir3:
+	case nir_intrinsic_ssbo_atomic_umin_ir3:
+	case nir_intrinsic_ssbo_atomic_imax_ir3:
+	case nir_intrinsic_ssbo_atomic_umax_ir3:
+	case nir_intrinsic_ssbo_atomic_and_ir3:
+	case nir_intrinsic_ssbo_atomic_or_ir3:
+	case nir_intrinsic_ssbo_atomic_xor_ir3:
+	case nir_intrinsic_ssbo_atomic_exchange_ir3:
+	case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
 		dst[0] = ctx->funcs->emit_intrinsic_atomic_ssbo(ctx, intr);
 		break;
 	case nir_intrinsic_load_shared:
diff --git a/src/freedreno/ir3/ir3_nir.c b/src/freedreno/ir3/ir3_nir.c
index 57595e00306..138f8f1af66 100644
--- a/src/freedreno/ir3/ir3_nir.c
+++ b/src/freedreno/ir3/ir3_nir.c
@@ -188,6 +188,7 @@ ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s,
 
 	OPT_V(s, nir_opt_global_to_local);
 	OPT_V(s, nir_lower_regs_to_ssa);
+	OPT_V(s, ir3_nir_lower_io_offsets);
 
 	if (key) {
 		if (s->info.stage == MESA_SHADER_VERTEX) {




More information about the mesa-commit mailing list