[Mesa-dev] [PATCH v2] spirv: compute push constant access offset & range
Jason Ekstrand
jason at jlekstrand.net
Wed Jan 4 20:59:47 UTC 2017
On Wed, Jan 4, 2017 at 11:26 AM, Jason Ekstrand <jason at jlekstrand.net>
wrote:
>
>
> On Jan 4, 2017 12:46, "Lionel Landwerlin" <lionel.g.landwerlin at intel.com>
> wrote:
>
> On 04/01/17 18:16, Jason Ekstrand wrote:
>
> On Jan 4, 2017 12:02, "Lionel Landwerlin" <lionel.g.landwerlin at intel.com>
> wrote:
>
> v2: Move relative push constant relative offset computation down to
> _vtn_load_store_tail() (Jason)
>
>
> Hm... I may not have been particularly clear. I meant, more or less, to
> have get_io_offset (is that what its called?) return the offer in terms of
> base, offset, and range like the push constant intrinsic expects and then,
> for everything else, do an add to combine the base and indirect parts.
> Does that make sense? There's even a comment in there about how I was lazy
> and just trusted constant folding before.
>
>
> I'm afraid I don't follow :(
> You would like to get this as a nir pass?
>
>
> Go ahead and merge this or the previous version with my rb and I'll look
> into it layer. I'm not even sure if my idea will be feasible.
>
Yeah, doing things that way is going to be painful... Just merge this
version.
Reviewed-by: Jason Ekstrand <jason at jlekstrand.net>
>
>
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
> ---
> src/compiler/spirv/vtn_variables.c | 78
> ++++++++++++++++++++-----
> src/intel/vulkan/anv_nir_lower_push_constants.c | 1 -
> 2 files changed, 65 insertions(+), 14 deletions(-)
>
> diff --git a/src/compiler/spirv/vtn_variables.c
> b/src/compiler/spirv/vtn_variables.c
> index 040c65061c..e3845365bd 100644
> --- a/src/compiler/spirv/vtn_variables.c
> +++ b/src/compiler/spirv/vtn_variables.c
> @@ -434,8 +434,37 @@ vtn_type_block_size(struct vtn_type *type)
> }
>
> static void
> +vtn_access_chain_get_offset_size(struct vtn_access_chain *chain,
> + unsigned *access_offset,
> + unsigned *access_size)
> +{
> + /* Only valid for push constants accesses now. */
> + assert(chain->var->mode == vtn_variable_mode_push_constant);
> +
> + struct vtn_type *type = chain->var->type;
> +
> + *access_offset = 0;
> +
> + for (unsigned i = 0; i < chain->length; i++) {
> + if (chain->link[i].mode != vtn_access_mode_literal)
> + break;
> +
> + if (glsl_type_is_struct(type->type)) {
> + *access_offset += type->offsets[chain->link[i].id];
> + type = type->members[chain->link[i].id];
> + } else {
> + *access_offset += type->stride * chain->link[i].id;
> + type = type->array_element;
> + }
> + }
> +
> + *access_size = vtn_type_block_size(type);
> +}
> +
> +static void
> _vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool
> load,
> nir_ssa_def *index, nir_ssa_def *offset,
> + unsigned access_offset, unsigned access_size,
> struct vtn_ssa_value **inout, const struct glsl_type
> *type)
> {
> nir_intrinsic_instr *instr = nir_intrinsic_instr_create(b->nb.shader,
> op);
> @@ -447,18 +476,25 @@ _vtn_load_store_tail(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> instr->src[src++] = nir_src_for_ssa((*inout)->def);
> }
>
> - /* We set the base and size for push constant load to the entire push
> - * constant block for now.
> - */
> if (op == nir_intrinsic_load_push_constant) {
> - nir_intrinsic_set_base(instr, 0);
> - nir_intrinsic_set_range(instr, 128);
> + assert(access_offset % 4 == 0);
> +
> + nir_intrinsic_set_base(instr, access_offset);
> + nir_intrinsic_set_range(instr, access_size);
> }
>
> if (index)
> instr->src[src++] = nir_src_for_ssa(index);
>
> - instr->src[src++] = nir_src_for_ssa(offset);
> + if (op == nir_intrinsic_load_push_constant) {
> + /* We need to subtract the offset from where the intrinsic will
> load the
> + * data. */
> + instr->src[src++] =
> + nir_src_for_ssa(nir_isub(&b->nb, offset,
> + nir_imm_int(&b->nb, access_offset)));
> + } else {
> + instr->src[src++] = nir_src_for_ssa(offset);
> + }
>
> if (load) {
> nir_ssa_dest_init(&instr->instr, &instr->dest,
> @@ -476,6 +512,7 @@ _vtn_load_store_tail(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> static void
> _vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool
> load,
> nir_ssa_def *index, nir_ssa_def *offset,
> + unsigned access_offset, unsigned access_size,
> struct vtn_access_chain *chain, unsigned chain_idx,
> struct vtn_type *type, struct vtn_ssa_value **inout)
> {
> @@ -520,6 +557,7 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> nir_iadd(&b->nb, offset,
> nir_imm_int(&b->nb, i * type->stride));
> _vtn_load_store_tail(b, op, load, index, elem_offset,
> + access_offset, access_size,
> &(*inout)->elems[i],
> glsl_vector_type(base_type,
> vec_width));
> }
> @@ -541,8 +579,9 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> offset = nir_iadd(&b->nb, offset, row_offset);
> if (load)
> *inout = vtn_create_ssa_value(b,
> glsl_scalar_type(base_type));
> - _vtn_load_store_tail(b, op, load, index, offset, inout,
> - glsl_scalar_type(base_type));
> + _vtn_load_store_tail(b, op, load, index, offset,
> + access_offset, access_size,
> + inout, glsl_scalar_type(base_type));
> } else {
> /* Grabbing a column; picking one element off each row */
> unsigned num_comps = glsl_get_vector_elements(type->type);
> @@ -562,6 +601,7 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> }
> comp = &temp_val;
> _vtn_load_store_tail(b, op, load, index, elem_offset,
> + access_offset, access_size,
> &comp,
> glsl_scalar_type(base_type));
> comps[i] = comp->def;
> }
> @@ -580,20 +620,25 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> offset = nir_iadd(&b->nb, offset, col_offset);
>
> _vtn_block_load_store(b, op, load, index, offset,
> + access_offset, access_size,
> chain, chain_idx + 1,
> type->array_element, inout);
> }
> } else if (chain == NULL) {
> /* Single whole vector */
> assert(glsl_type_is_vector_or_scalar(type->type));
> - _vtn_load_store_tail(b, op, load, index, offset, inout,
> type->type);
> + _vtn_load_store_tail(b, op, load, index, offset,
> + access_offset, access_size,
> + inout, type->type);
> } else {
> /* Single component of a vector. Fall through to array case. */
> nir_ssa_def *elem_offset =
> vtn_access_link_as_ssa(b, chain->link[chain_idx],
> type->stride);
> offset = nir_iadd(&b->nb, offset, elem_offset);
>
> - _vtn_block_load_store(b, op, load, index, offset, NULL, 0,
> + _vtn_block_load_store(b, op, load, index, offset,
> + access_offset, access_size,
> + NULL, 0,
> type->array_element, inout);
> }
> return;
> @@ -603,7 +648,9 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> for (unsigned i = 0; i < elems; i++) {
> nir_ssa_def *elem_off =
> nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i *
> type->stride));
> - _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
> + _vtn_block_load_store(b, op, load, index, elem_off,
> + access_offset, access_size,
> + NULL, 0,
> type->array_element, &(*inout)->elems[i]);
> }
> return;
> @@ -614,7 +661,9 @@ _vtn_block_load_store(struct vtn_builder *b,
> nir_intrinsic_op op, bool load,
> for (unsigned i = 0; i < elems; i++) {
> nir_ssa_def *elem_off =
> nir_iadd(&b->nb, offset, nir_imm_int(&b->nb,
> type->offsets[i]));
> - _vtn_block_load_store(b, op, load, index, elem_off, NULL, 0,
> + _vtn_block_load_store(b, op, load, index, elem_off,
> + access_offset, access_size,
> + NULL, 0,
> type->members[i], &(*inout)->elems[i]);
> }
> return;
> @@ -629,6 +678,7 @@ static struct vtn_ssa_value *
> vtn_block_load(struct vtn_builder *b, struct vtn_access_chain *src)
> {
> nir_intrinsic_op op;
> + unsigned access_offset = 0, access_size = 0;
> switch (src->var->mode) {
> case vtn_variable_mode_ubo:
> op = nir_intrinsic_load_ubo;
> @@ -638,6 +688,7 @@ vtn_block_load(struct vtn_builder *b, struct
> vtn_access_chain *src)
> break;
> case vtn_variable_mode_push_constant:
> op = nir_intrinsic_load_push_constant;
> + vtn_access_chain_get_offset_size(src, &access_offset,
> &access_size);
> break;
> default:
> assert(!"Invalid block variable mode");
> @@ -650,6 +701,7 @@ vtn_block_load(struct vtn_builder *b, struct
> vtn_access_chain *src)
>
> struct vtn_ssa_value *value = NULL;
> _vtn_block_load_store(b, op, true, index, offset,
> + access_offset, access_size,
> src, chain_idx, type, &value);
> return value;
> }
> @@ -664,7 +716,7 @@ vtn_block_store(struct vtn_builder *b, struct
> vtn_ssa_value *src,
> offset = vtn_access_chain_to_offset(b, dst, &index, &type, &chain_idx,
> true);
>
> _vtn_block_load_store(b, nir_intrinsic_store_ssbo, false, index,
> offset,
> - dst, chain_idx, type, &src);
> + 0, 0, dst, chain_idx, type, &src);
> }
>
> static bool
> diff --git a/src/intel/vulkan/anv_nir_lower_push_constants.c
> b/src/intel/vulkan/anv_nir_lower_push_constants.c
> index 64c155372f..b66552825b 100644
> --- a/src/intel/vulkan/anv_nir_lower_push_constants.c
> +++ b/src/intel/vulkan/anv_nir_lower_push_constants.c
> @@ -42,7 +42,6 @@ anv_nir_lower_push_constants(nir_shader *shader)
> continue;
>
> assert(intrin->const_index[0] % 4 == 0);
> - assert(intrin->const_index[1] == 128);
>
> /* We just turn them into uniform loads */
> intrin->intrinsic = nir_intrinsic_load_uniform;
> --
> 2.11.0
>
>
>
>
> _______________________________________________
> mesa-dev mailing listmesa-dev at lists.freedesktop.orghttps://lists.freedesktop.org/mailman/listinfo/mesa-dev
>
>
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20170104/b3a0b677/attachment-0001.html>
More information about the mesa-dev
mailing list