[Mesa-dev] [PATCH 3/3] spirv: Allow OpPtrAccessChain for block indices
Jason Ekstrand
jason at jlekstrand.net
Fri Dec 1 03:06:57 UTC 2017
The SPIR-V spec is a bit underspecified when it comes to exactly how
you're allowed to use OpPtrAccessChain and what it means in certain edge
cases. In particular, what if the base pointer of the OpPtrAccessChain
points to the base struct of an SSBO instead of an element in that SSBO.
The original variable pointers implementation in mesa assumed that you
weren't allowed to do an OpPtrAccessChain that adjusted the block index
and asserted such. However, there are some CTS tests that do this and,
if the CTS does it, someone will do it in the wild so we should probably
handle it. With this commit, we significantly reduce our assumptions
and should be able to handle more-or-less anything.
The one assumption we still make for correctness is that if we see an
OpPtrAccessChain on a pointer to a struct decorated block that the block
index should be adjusted. In theory, someone could try to put an array
stride on such a pointer and try to make the SSBO an implicit array of
the base struct and we would not give them what they want. That said,
any index other than 0 would count as an out-of-bounds access which is
invalid.
---
src/compiler/spirv/vtn_variables.c | 128 ++++++++++++++++++++++++-------------
1 file changed, 83 insertions(+), 45 deletions(-)
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index 09b3d35..89ce939 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -157,6 +157,22 @@ vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
return &instr->dest.ssa;
}
+static nir_ssa_def *
+vtn_resource_reindex(struct vtn_builder *b, nir_ssa_def *base_index,
+ nir_ssa_def *offset_index)
+{
+ nir_intrinsic_instr *instr =
+ nir_intrinsic_instr_create(b->nb.shader,
+ nir_intrinsic_vulkan_resource_reindex);
+ instr->src[0] = nir_src_for_ssa(base_index);
+ instr->src[1] = nir_src_for_ssa(offset_index);
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+
+ return &instr->dest.ssa;
+}
+
static struct vtn_pointer *
vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
struct vtn_pointer *base,
@@ -167,47 +183,61 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
struct vtn_type *type = base->type;
unsigned idx = 0;
- if (deref_chain->ptr_as_array) {
- /* We need ptr_type for the stride */
- assert(base->ptr_type);
- /* This must be a pointer to an actual element somewhere */
- assert(offset);
- assert(block_index || base->mode == vtn_variable_mode_workgroup);
- /* We need at least one element in the chain */
- assert(deref_chain->length >= 1);
+ if (base->mode == vtn_variable_mode_ubo ||
+ base->mode == vtn_variable_mode_ssbo) {
+ if (!block_index) {
+ assert(base->var && base->type);
+ nir_ssa_def *desc_arr_idx;
+ if (glsl_type_is_array(type->type)) {
+ if (deref_chain->length >= 1) {
+ desc_arr_idx =
+ vtn_access_link_as_ssa(b, deref_chain->link[0], 1);
+ idx++;
+ /* This consumes a level of type */
+ type = type->array_element;
+ } else {
+ /* This is annoying. We've been asked for a pointer to the
+ * array of UBOs/SSBOs and not a specifc buffer. Return a
+ * pointer with a descriptor index of 0 and we'll have to do
+ * a reindex later to adjust it to the right thing.
+ */
+ desc_arr_idx = nir_imm_int(&b->nb, 0);
+ }
+ } else if (deref_chain->ptr_as_array) {
+ /* You can't have a zero-length OpPtrAccessChain */
+ assert(deref_chain->length >= 1);
+ desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1);
+ } else {
+ /* We have a regular non-array SSBO. */
+ desc_arr_idx = NULL;
+ }
+ block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
+ } else if (deref_chain->ptr_as_array &&
+ type->base_type == vtn_base_type_struct && type->block) {
+ /* We are doing an OpPtrAccessChain on a pointer to a struct
+ * decorated block. Do a reindex to add the first link in the
+ * OpPtrAccessChain to the index we received.
+ */
+ assert(deref_chain->length >= 1);
+ nir_ssa_def *offset_index =
+ vtn_access_link_as_ssa(b, deref_chain->link[0], 1);
+ idx++;
- nir_ssa_def *elem_offset =
- vtn_access_link_as_ssa(b, deref_chain->link[idx],
- base->ptr_type->stride);
- offset = nir_iadd(&b->nb, offset, elem_offset);
- idx++;
+ block_index = vtn_resource_reindex(b, block_index, offset_index);
+ }
}
if (!offset) {
- /* This is the first access chain so we don't have a block index */
- assert(!block_index);
+ if (base->mode == vtn_variable_mode_workgroup) {
+ /* SLM doesn't need nor have a block index */
+ assert(!block_index);
- assert(base->var);
- assert(base->ptr_type);
- switch (base->mode) {
- case vtn_variable_mode_ubo:
- case vtn_variable_mode_ssbo:
- if (glsl_type_is_array(type->type)) {
- /* We need at least one element in the chain */
- assert(deref_chain->length >= 1);
+ /* We need the variable for the base offset */
+ assert(base->var);
- nir_ssa_def *desc_arr_idx =
- vtn_access_link_as_ssa(b, deref_chain->link[0], 1);
- block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
- type = type->array_element;
- idx++;
- } else {
- block_index = vtn_variable_resource_index(b, base->var, NULL);
- }
- offset = nir_imm_int(&b->nb, 0);
- break;
+ /* We need ptr_type for size and alignment */
+ assert(base->ptr_type);
- case vtn_variable_mode_workgroup:
/* Assign location on first use so that we don't end up bloating SLM
* address space for variables which are never statically used.
*/
@@ -219,15 +249,29 @@ vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
b->shader->num_shared += base->ptr_type->length;
}
- block_index = NULL;
offset = nir_imm_int(&b->nb, base->var->shared_location);
- break;
+ } else {
+ /* The code above should have ensured a block_index when needed. */
+ assert(block_index);
- default:
- unreachable("Invalid offset pointer mode");
+ /* Start off with at the start of the buffer. */
+ offset = nir_imm_int(&b->nb, 0);
}
}
- assert(offset);
+
+ if (deref_chain->ptr_as_array && idx == 0) {
+ /* We need ptr_type for the stride */
+ assert(base->ptr_type);
+
+ /* We need at least one element in the chain */
+ assert(deref_chain->length >= 1);
+
+ nir_ssa_def *elem_offset =
+ vtn_access_link_as_ssa(b, deref_chain->link[idx],
+ base->ptr_type->stride);
+ offset = nir_iadd(&b->nb, offset, elem_offset);
+ idx++;
+ }
for (; idx < deref_chain->length; idx++) {
switch (glsl_get_base_type(type->type)) {
@@ -1535,12 +1579,6 @@ vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
*/
assert(!ptr->offset && !ptr->block_index);
- /* We can't handle a pointer to an array of descriptors because we have
- * no way of knowing later on that we need to add to update the block
- * index when dereferencing.
- */
- assert(ptr->var && ptr->var->type->base_type == vtn_base_type_struct);
-
struct vtn_access_chain chain = {
.length = 0,
};
--
2.5.0.400.gff86faf
More information about the mesa-dev
mailing list