[Mesa-dev] [PATCH v2 2/2] radv: implement AMD_gcn_shader extension
Connor Abbott
cwabbott0 at gmail.com
Wed Feb 21 20:13:36 UTC 2018
On Wed, Feb 21, 2018 at 3:03 PM, Daniel Schürmann
<daniel.schuermann at campus.tu-berlin.de> wrote:
>
>> On Wed, Feb 21, 2018 at 1:00 PM, <daniel.schuermann at campus.tu-berlin.de>
>> wrote:
>>>
>>> From: Dave Airlie <airlied at redhat.com>
>>>
>>> Co-authored-by: Daniel Schürmann <daniel.schuermann at campus.tu-berlin.de>
>>> Signed-off-by: Daniel Schürmann <daniel.schuermann at campus.tu-berlin.de>
>>> ---
>>> src/amd/common/ac_llvm_build.c | 3 +-
>>> src/amd/common/ac_nir_to_llvm.c | 39 ++++++++++++++++++++++
>>> src/amd/vulkan/radv_extensions.py | 1 +
>>> src/compiler/nir/meson.build | 1 +
>>> src/compiler/nir/nir_intrinsics.h | 4 +++
>>> src/compiler/spirv/spirv_to_nir.c | 2 ++
>>> src/compiler/spirv/vtn_amd.c | 68
>>> +++++++++++++++++++++++++++++++++++++++
>>> src/compiler/spirv/vtn_private.h | 3 ++
>>> 8 files changed, 120 insertions(+), 1 deletion(-)
>>> create mode 100644 src/compiler/spirv/vtn_amd.c
>>>
>>> diff --git a/src/amd/common/ac_llvm_build.c
>>> b/src/amd/common/ac_llvm_build.c
>>> index 15144addb9..3bb74c2b0b 100644
>>> --- a/src/amd/common/ac_llvm_build.c
>>> +++ b/src/amd/common/ac_llvm_build.c
>>> @@ -370,7 +370,8 @@ LLVMValueRef
>>> ac_build_shader_clock(struct ac_llvm_context *ctx)
>>> {
>>> LLVMValueRef tmp = ac_build_intrinsic(ctx,
>>> "llvm.readcyclecounter",
>>> - ctx->i64, NULL, 0, 0);
>>> + ctx->i64, NULL, 0,
>>> AC_FUNC_ATTR_READONLY);
>>> + ac_build_optimization_barrier(ctx, &tmp);
>>
>> ac_build_optimization_barrier() creates an empty inline asm statement,
>> which actually doesn't do much to prevent code motion beyond the
>> attributes already added to llvm.readcyclecounter by llvm. It prevents
>> duplicating it, but that's about it, and not useful anyways. We only
>> use it to work around some problems with cross-wavefront intrinsics,
>> which don't exist here. You can just drop this hunk.
>
> It also prevents LLVM from eliminating multiple calls to the same function,
> which is the purpose in this case. (And also functions as "kind of" code
> motion barrier)
I think we can fix this by removing the "readonly" parameter from
ac_build_intrinsic(). LLVM doesn't really give you many extra
guarantees with an inline asm block, compared to an intrinsic with no
flags set. The only difference, I think, is that noduplicate is
implied for asm calls with the sideeffect bit set, but that's not
useful here.
>
>>
>>> return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
>>> }
>>>
>>> diff --git a/src/amd/common/ac_nir_to_llvm.c
>>> b/src/amd/common/ac_nir_to_llvm.c
>>> index 2460e105f7..05f28b26a2 100644
>>> --- a/src/amd/common/ac_nir_to_llvm.c
>>> +++ b/src/amd/common/ac_nir_to_llvm.c
>>> @@ -4328,6 +4328,38 @@ load_patch_vertices_in(struct ac_shader_abi *abi)
>>> return LLVMConstInt(ctx->ac.i32,
>>> ctx->options->key.tcs.input_vertices, false);
>>> }
>>>
>>> +static LLVMValueRef
>>> +visit_cube_face_index(struct ac_nir_context *ctx,
>>> + nir_intrinsic_instr *instr)
>>> +{
>>> + LLVMValueRef result;
>>> + LLVMValueRef in[3];
>>> + LLVMValueRef src0 = ac_to_float(&ctx->ac, get_src(ctx,
>>> instr->src[0]));
>>> + for (unsigned chan = 0; chan < 3; chan++)
>>> + in[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
>>> +
>>> + result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubeid",
>>> + ctx->ac.f32, in, 3,
>>> AC_FUNC_ATTR_READNONE);
>>> + return result;
>>> +}
>>> +
>>> +static LLVMValueRef
>>> +visit_cube_face_coord(struct ac_nir_context *ctx,
>>> + nir_intrinsic_instr *instr)
>>> +{
>>> + LLVMValueRef results[2];
>>> + LLVMValueRef in[3];
>>> + LLVMValueRef src0 = ac_to_float(&ctx->ac, get_src(ctx,
>>> instr->src[0]));
>>> + for (unsigned chan = 0; chan < 3; chan++)
>>> + in[chan] = ac_llvm_extract_elem(&ctx->ac, src0, chan);
>>> +
>>> + results[0] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubetc",
>>> + ctx->ac.f32, in, 3,
>>> AC_FUNC_ATTR_READNONE);
>>> + results[1] = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.cubesc",
>>> + ctx->ac.f32, in, 3,
>>> AC_FUNC_ATTR_READNONE);
>>> + return ac_build_gather_values(&ctx->ac, results, 2);
>>> +}
>>> +
>>> static void visit_intrinsic(struct ac_nir_context *ctx,
>>> nir_intrinsic_instr *instr)
>>> {
>>> @@ -4613,6 +4645,13 @@ static void visit_intrinsic(struct ac_nir_context
>>> *ctx,
>>> result = LLVMBuildSExt(ctx->ac.builder, tmp,
>>> ctx->ac.i32, "");
>>> break;
>>> }
>>> + case nir_intrinsic_cube_face_index:
>>> + result = visit_cube_face_index(ctx, instr);
>>> + break;
>>> + case nir_intrinsic_cube_face_coord:
>>> + result = visit_cube_face_coord(ctx, instr);
>>> + break;
>>> +
>>> default:
>>> fprintf(stderr, "Unknown intrinsic: ");
>>> nir_print_instr(&instr->instr, stderr);
>>> diff --git a/src/amd/vulkan/radv_extensions.py
>>> b/src/amd/vulkan/radv_extensions.py
>>> index d761895d3a..a63e01faae 100644
>>> --- a/src/amd/vulkan/radv_extensions.py
>>> +++ b/src/amd/vulkan/radv_extensions.py
>>> @@ -88,6 +88,7 @@ EXTENSIONS = [
>>> Extension('VK_EXT_external_memory_host', 1,
>>> 'device->rad_info.has_userptr'),
>>> Extension('VK_EXT_global_priority', 1,
>>> 'device->rad_info.has_ctx_priority'),
>>> Extension('VK_AMD_draw_indirect_count', 1, True),
>>> + Extension('VK_AMD_gcn_shader', 1, True),
>>> Extension('VK_AMD_rasterization_order', 1,
>>> 'device->rad_info.chip_class >= VI && device->rad_info.max_se >= 2'),
>>> Extension('VK_AMD_shader_info', 1, True),
>>> ]
>>> diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build
>>> index 859a0c1e62..e0011a4dc0 100644
>>> --- a/src/compiler/nir/meson.build
>>> +++ b/src/compiler/nir/meson.build
>>> @@ -189,6 +189,7 @@ files_libnir = files(
>>> '../spirv/spirv_info.h',
>>> '../spirv/spirv_to_nir.c',
>>> '../spirv/vtn_alu.c',
>>> + '../spirv/vtn_amd.c',
>>> '../spirv/vtn_cfg.c',
>>> '../spirv/vtn_glsl450.c',
>>> '../spirv/vtn_private.h',
>>> diff --git a/src/compiler/nir/nir_intrinsics.h
>>> b/src/compiler/nir/nir_intrinsics.h
>>> index ede2927787..aefb8a3626 100644
>>> --- a/src/compiler/nir/nir_intrinsics.h
>>> +++ b/src/compiler/nir/nir_intrinsics.h
>>> @@ -318,6 +318,10 @@ INTRINSIC(shared_atomic_xor, 2, ARR(1, 1), true, 1,
>>> 0, 1, BASE, xx, xx, 0)
>>> INTRINSIC(shared_atomic_exchange, 2, ARR(1, 1), true, 1, 0, 1, BASE,
>>> xx, xx, 0)
>>> INTRINSIC(shared_atomic_comp_swap, 3, ARR(1, 1, 1), true, 1, 0, 1,
>>> BASE, xx, xx, 0)
>>>
>>> +/* AMD_gcn_shader intrinsics */
>>> +INTRINSIC(cube_face_coord, 1, ARR(3), true, 2, 0, 0, xx, xx, xx, 0)
>>> +INTRINSIC(cube_face_index, 1, ARR(3), true, 1, 0, 0, xx, xx, xx, 0)
>>> +
>>> /* Used by nir_builder.h to generate loader helpers for the system
>>> values. */
>>> #ifndef DEFINE_SYSTEM_VALUE
>>> #define DEFINE_SYSTEM_VALUE(name)
>>> diff --git a/src/compiler/spirv/spirv_to_nir.c
>>> b/src/compiler/spirv/spirv_to_nir.c
>>> index c6df764682..40a52da13b 100644
>>> --- a/src/compiler/spirv/spirv_to_nir.c
>>> +++ b/src/compiler/spirv/spirv_to_nir.c
>>> @@ -373,6 +373,8 @@ vtn_handle_extension(struct vtn_builder *b, SpvOp
>>> opcode,
>>> struct vtn_value *val = vtn_push_value(b, w[1],
>>> vtn_value_type_extension);
>>> if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
>>> val->ext_handler = vtn_handle_glsl450_instruction;
>>> + } else if (strcmp((const char *)&w[2], "SPV_AMD_gcn_shader") == 0)
>>> {
>>> + val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
>>> } else {
>>> vtn_fail("Unsupported extension");
>>> }
>>> diff --git a/src/compiler/spirv/vtn_amd.c b/src/compiler/spirv/vtn_amd.c
>>> new file mode 100644
>>> index 0000000000..82a90ff269
>>> --- /dev/null
>>> +++ b/src/compiler/spirv/vtn_amd.c
>>> @@ -0,0 +1,68 @@
>>> +/*
>>> + * Copyright © 2017 Valve Corporation
>>> + * Copyright © 2017 Red Hat
>>> + *
>>> + * Permission is hereby granted, free of charge, to any person obtaining
>>> a
>>> + * copy of this software and associated documentation files (the
>>> "Software"),
>>> + * to deal in the Software without restriction, including without
>>> limitation
>>> + * the rights to use, copy, modify, merge, publish, distribute,
>>> sublicense,
>>> + * and/or sell copies of the Software, and to permit persons to whom the
>>> + * Software is furnished to do so, subject to the following conditions:
>>> + *
>>> + * The above copyright notice and this permission notice (including the
>>> next
>>> + * paragraph) shall be included in all copies or substantial portions of
>>> the
>>> + * Software.
>>> + *
>>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
>>> EXPRESS OR
>>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
>>> MERCHANTABILITY,
>>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
>>> SHALL
>>> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
>>> OTHER
>>> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>>> ARISING
>>> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
>>> DEALINGS
>>> + * IN THE SOFTWARE.
>>> + *
>>> + */
>>> +
>>> +#include "vtn_private.h"
>>> +#include "GLSL.ext.AMD.h"
>>> +
>>> +bool
>>> +vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, uint32_t
>>> ext_opcode,
>>> + const uint32_t *w, unsigned count)
>>> +{
>>> + nir_intrinsic_op op;
>>> + const struct glsl_type *dest_type =
>>> + vtn_value(b, w[1], vtn_value_type_type)->type->type;
>>> +
>>> + switch ((enum GcnShaderAMD)ext_opcode) {
>>> + case CubeFaceIndexAMD:
>>> + op = nir_intrinsic_cube_face_index;
>>> + break;
>>> + case CubeFaceCoordAMD:
>>> + op = nir_intrinsic_cube_face_coord;
>>> + break;
>>> + case TimeAMD:
>>> + op = nir_intrinsic_shader_clock;
>>> + break;
>>> + default:
>>> + unreachable("Invalid opcode");
>>> + }
>>> +
>>> + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
>>> + nir_intrinsic_instr *intrin =
>>> nir_intrinsic_instr_create(b->nb.shader, op);
>>> +
>>> + if (ext_opcode != TimeAMD) {
>>> + intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[5])->def);
>>> + nir_ssa_dest_init(&intrin->instr, &intrin->dest,
>>> glsl_get_vector_elements(dest_type),
>>> + glsl_get_bit_size(dest_type), NULL);
>>> + nir_builder_instr_insert(&b->nb, &intrin->instr);
>>> + val->ssa = vtn_create_ssa_value(b, dest_type);
>>> + val->ssa->def = &intrin->dest.ssa;
>>> + } else {
>>> + nir_ssa_dest_init(&intrin->instr, &intrin->dest, 2, 32, NULL);
>>> + nir_builder_instr_insert(&b->nb, &intrin->instr);
>>> + val->ssa = vtn_create_ssa_value(b, dest_type);
>>> + val->ssa->def = nir_build_alu(&b->nb, nir_op_pack_64_2x32,
>>> &intrin->dest.ssa, NULL, NULL, NULL);
>>
>> You can use the autogenerated nir_pack_64_2x32() function here
>> directly, rather than using nir_build_alu().
>
> Thx
>
>>
>>> + }
>>> + return true;
>>> +}
>>> diff --git a/src/compiler/spirv/vtn_private.h
>>> b/src/compiler/spirv/vtn_private.h
>>> index 3e49df4dac..a703d45b18 100644
>>> --- a/src/compiler/spirv/vtn_private.h
>>> +++ b/src/compiler/spirv/vtn_private.h
>>> @@ -723,4 +723,7 @@ vtn_u64_literal(const uint32_t *w)
>>> return (uint64_t)w[1] << 32 | w[0];
>>> }
>>>
>>> +bool vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b,
>>> uint32_t ext_opcode,
>>> + const uint32_t *words,
>>> unsigned count);
>>> +
>>> #endif /* _VTN_PRIVATE_H_ */
>>> --
>>> 2.14.1
>>>
>
More information about the mesa-dev
mailing list