[Mesa-dev] [PATCH v2 50/52] spirv: Rework barriers
Lionel Landwerlin
lionel.g.landwerlin at intel.com
Fri Oct 13 10:56:19 UTC 2017
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
On 13/10/17 06:48, Jason Ekstrand wrote:
> Our previous handling of barriers always used the big hammer and didn't
> correctly emit memory barriers when specified along with a control
> barrier. This commit completely reworks the way we emit barriers to
> make things both more precise and more correct.
> ---
> src/compiler/spirv/spirv_to_nir.c | 132 ++++++++++++++++++++++++++++++++------
> 1 file changed, 114 insertions(+), 18 deletions(-)
>
> diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
> index 079ff0f..a729ef4 100644
> --- a/src/compiler/spirv/spirv_to_nir.c
> +++ b/src/compiler/spirv/spirv_to_nir.c
> @@ -2571,36 +2571,132 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
> }
>
> static void
> +vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
> +{
> + nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
> + nir_builder_instr_insert(&b->nb, &intrin->instr);
> +}
> +
> +static void
> +vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
> + SpvMemorySemanticsMask semantics)
> +{
> + static const SpvMemorySemanticsMask all_memory_semantics =
> + SpvMemorySemanticsUniformMemoryMask |
> + SpvMemorySemanticsWorkgroupMemoryMask |
> + SpvMemorySemanticsAtomicCounterMemoryMask |
> + SpvMemorySemanticsImageMemoryMask;
> +
> + /* If we're not actually doing a memory barrier, bail */
> + if (!(semantics & all_memory_semantics))
> + return;
> +
> + /* GL and Vulkan don't have these */
> + assert(scope != SpvScopeCrossDevice);
> +
> + if (scope == SpvScopeSubgroup)
> + return; /* Nothing to do here */
> +
> + if (scope == SpvScopeWorkgroup) {
> + vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
> + return;
> + }
> +
> + /* There's only two scopes thing left */
> + assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
> +
> + if ((semantics & all_memory_semantics) == all_memory_semantics) {
> + vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
> + return;
> + }
> +
> + /* Issue a bunch of more specific barriers */
> + uint32_t bits = semantics;
> + while (bits) {
> + SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits);
> + switch (semantic) {
> + case SpvMemorySemanticsUniformMemoryMask:
> + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
> + break;
> + case SpvMemorySemanticsWorkgroupMemoryMask:
> + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
> + break;
> + case SpvMemorySemanticsAtomicCounterMemoryMask:
> + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
> + break;
> + case SpvMemorySemanticsImageMemoryMask:
> + vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
> + break;
> + default:
> + break;;
> + }
> + }
> +}
> +
> +static void
> vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
> const uint32_t *w, unsigned count)
> {
> - nir_intrinsic_op intrinsic_op;
> switch (opcode) {
> case SpvOpEmitVertex:
> case SpvOpEmitStreamVertex:
> - intrinsic_op = nir_intrinsic_emit_vertex;
> - break;
> case SpvOpEndPrimitive:
> - case SpvOpEndStreamPrimitive:
> - intrinsic_op = nir_intrinsic_end_primitive;
> - break;
> - case SpvOpMemoryBarrier:
> - intrinsic_op = nir_intrinsic_memory_barrier;
> - break;
> - case SpvOpControlBarrier:
> - intrinsic_op = nir_intrinsic_barrier;
> + case SpvOpEndStreamPrimitive: {
> + nir_intrinsic_op intrinsic_op;
> + switch (opcode) {
> + case SpvOpEmitVertex:
> + case SpvOpEmitStreamVertex:
> + intrinsic_op = nir_intrinsic_emit_vertex;
> + break;
> + case SpvOpEndPrimitive:
> + case SpvOpEndStreamPrimitive:
> + intrinsic_op = nir_intrinsic_end_primitive;
> + break;
> + default:
> + unreachable("Invalid opcode");
> + }
> +
> + nir_intrinsic_instr *intrin =
> + nir_intrinsic_instr_create(b->shader, intrinsic_op);
> +
> + switch (opcode) {
> + case SpvOpEmitStreamVertex:
> + case SpvOpEndStreamPrimitive:
> + nir_intrinsic_set_stream_id(intrin, w[1]);
> + break;
> + default:
> + break;
> + }
> +
> + nir_builder_instr_insert(&b->nb, &intrin->instr);
> break;
> - default:
> - unreachable("unknown barrier instruction");
> }
>
> - nir_intrinsic_instr *intrin =
> - nir_intrinsic_instr_create(b->shader, intrinsic_op);
> + case SpvOpMemoryBarrier: {
> + SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0];
> + SpvMemorySemanticsMask semantics =
> + vtn_constant_value(b, w[2])->values[0].u32[0];
> + vtn_emit_memory_barrier(b, scope, semantics);
> + return;
> + }
> +
> + case SpvOpControlBarrier: {
> + SpvScope execution_scope =
> + vtn_constant_value(b, w[1])->values[0].u32[0];
> + if (execution_scope == SpvScopeWorkgroup)
> + vtn_emit_barrier(b, nir_intrinsic_barrier);
>
> - if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
> - nir_intrinsic_set_stream_id(intrin, w[1]);
> + SpvScope memory_scope =
> + vtn_constant_value(b, w[2])->values[0].u32[0];
> + SpvMemorySemanticsMask memory_semantics =
> + vtn_constant_value(b, w[3])->values[0].u32[0];
> + vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
> + break;
> + }
>
> - nir_builder_instr_insert(&b->nb, &intrin->instr);
> + default:
> + unreachable("unknown barrier instruction");
> + }
> }
>
> static unsigned
More information about the mesa-dev
mailing list