[Mesa-dev] [PATCH 1/2] spirv/nir: handle memory access qualifiers for SSBO loads/stores
Samuel Pitoiset
samuel.pitoiset at gmail.com
Wed Oct 3 21:08:42 UTC 2018
Signed-off-by: Samuel Pitoiset <samuel.pitoiset at gmail.com>
---
src/compiler/nir/nir_intrinsics.py | 4 ++--
src/compiler/spirv/spirv_to_nir.c | 29 ++++++++++++++++++++++++++---
src/compiler/spirv/vtn_private.h | 3 +++
src/compiler/spirv/vtn_variables.c | 16 ++++++++++++----
4 files changed, 43 insertions(+), 9 deletions(-)
diff --git a/src/compiler/nir/nir_intrinsics.py b/src/compiler/nir/nir_intrinsics.py
index b06b38fc2c..ec3049ca06 100644
--- a/src/compiler/nir/nir_intrinsics.py
+++ b/src/compiler/nir/nir_intrinsics.py
@@ -565,7 +565,7 @@ intrinsic("load_interpolated_input", src_comp=[2, 1], dest_comp=0,
indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
# src[] = { buffer_index, offset }. No const_index
-load("ssbo", 2, flags=[CAN_ELIMINATE])
+load("ssbo", 2, flags=[CAN_ELIMINATE], indices=[ACCESS])
# src[] = { offset }. const_index[] = { base, component }
load("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE])
# src[] = { vertex, offset }. const_index[] = { base }
@@ -591,6 +591,6 @@ store("output", 2, [BASE, WRMASK, COMPONENT])
# const_index[] = { base, write_mask, component }
store("per_vertex_output", 3, [BASE, WRMASK, COMPONENT])
# src[] = { value, block_index, offset }. const_index[] = { write_mask }
-store("ssbo", 3, [WRMASK])
+store("ssbo", 3, [WRMASK, ACCESS])
# src[] = { value, offset }. const_index[] = { base, write_mask }
store("shared", 2, [BASE, WRMASK])
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index 2ad83196e4..26b8247c42 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -668,6 +668,21 @@ mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
return type;
}
+static void
+vtn_handle_access_qualifer(struct vtn_builder *b, struct vtn_type *type,
+ int member, enum gl_access_qualifier access)
+{
+ type->members[member] = vtn_type_copy(b, type->members[member]);
+ type = type->members[member];
+
+ while (glsl_type_is_array(type->type)) {
+ type->array_element = vtn_type_copy(b, type->array_element);
+ type = type->array_element;
+ }
+
+ type->access |= access;
+}
+
static void
struct_member_decoration_cb(struct vtn_builder *b,
struct vtn_value *val, int member,
@@ -681,13 +696,21 @@ struct_member_decoration_cb(struct vtn_builder *b,
assert(member < ctx->num_fields);
switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ case SpvDecorationUniform:
+ break; /* FIXME: Do nothing with this for now. */
case SpvDecorationNonWritable:
+ vtn_handle_access_qualifer(b, ctx->type, member, ACCESS_NON_WRITEABLE);
+ break;
case SpvDecorationNonReadable:
- case SpvDecorationRelaxedPrecision:
+ vtn_handle_access_qualifer(b, ctx->type, member, ACCESS_NON_READABLE);
+ break;
case SpvDecorationVolatile:
+ vtn_handle_access_qualifer(b, ctx->type, member, ACCESS_VOLATILE);
+ break;
case SpvDecorationCoherent:
- case SpvDecorationUniform:
- break; /* FIXME: Do nothing with this for now. */
+ vtn_handle_access_qualifer(b, ctx->type, member, ACCESS_COHERENT);
+ break;
case SpvDecorationNoPerspective:
ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
break;
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
index a31202d129..c930e1cba4 100644
--- a/src/compiler/spirv/vtn_private.h
+++ b/src/compiler/spirv/vtn_private.h
@@ -312,6 +312,9 @@ struct vtn_type {
/* Which built-in to use */
SpvBuiltIn builtin;
+
+ /* Buffer access qualifiers */
+ enum gl_access_qualifier access;
};
/* Members for struct types */
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index 358ff4bef7..69fb6dd862 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -608,7 +608,8 @@ static void
_vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_ssa_def *index, nir_ssa_def *offset,
unsigned access_offset, unsigned access_size,
- struct vtn_ssa_value **inout, const struct glsl_type *type)
+ struct vtn_ssa_value **inout, const struct glsl_type *type,
+ enum gl_access_qualifier access)
{
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader, op);
instr->num_components = glsl_get_vector_elements(type);
@@ -624,6 +625,11 @@ _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_intrinsic_set_range(instr, access_size);
}
+ if (op == nir_intrinsic_load_ssbo ||
+ op == nir_intrinsic_store_ssbo) {
+ nir_intrinsic_set_access(instr, access);
+ }
+
if (index)
instr->src[src++] = nir_src_for_ssa(index);
@@ -704,7 +710,8 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
_vtn_load_store_tail(b, op, load, index, elem_offset,
access_offset, access_size,
&(*inout)->elems[i],
- glsl_vector_type(base_type, vec_width));
+ glsl_vector_type(base_type, vec_width),
+ type->access);
}
if (load && type->row_major)
@@ -717,7 +724,7 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
vtn_assert(glsl_type_is_vector_or_scalar(type->type));
_vtn_load_store_tail(b, op, load, index, offset,
access_offset, access_size,
- inout, type->type);
+ inout, type->type, type->access);
} else {
/* This is a strided load. We have to load N things separately.
* This is the single column of a row-major matrix case.
@@ -738,7 +745,8 @@ _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
comp = &temp_val;
_vtn_load_store_tail(b, op, load, index, elem_offset,
access_offset, access_size,
- &comp, glsl_scalar_type(base_type));
+ &comp, glsl_scalar_type(base_type),
+ type->access);
per_comp[i] = comp->def;
}
--
2.19.0
More information about the mesa-dev
mailing list