[Mesa-dev] [PATCH 6/6] radeonsi: rename prefixes from radeon to si
Nicolai Hähnle
nhaehnle at gmail.com
Tue Oct 18 09:54:44 UTC 2016
Makes sense as a cleanup. At some point it would make sense to look into
sharing some stuff with radv instead. There's probably not a huge amount
because of the NIR/TGSI split, but still.
Patches 1 & 4:
Acked-by: Nicolai Hähnle <nicolai.haehnle at amd.com>
Patches 2, 3, 5, 6:
Reviewed-by: Nicolai Hähnle <nicolai.haehnle at amd.com>
On 17.10.2016 15:44, Marek Olšák wrote:
> From: Marek Olšák <marek.olsak at amd.com>
>
> ---
> src/gallium/drivers/radeonsi/si_pipe.c | 2 +-
> src/gallium/drivers/radeonsi/si_shader.c | 96 ++++++-------
> src/gallium/drivers/radeonsi/si_shader_internal.h | 70 +++++-----
> .../drivers/radeonsi/si_shader_tgsi_setup.c | 150 ++++++++++-----------
> 4 files changed, 159 insertions(+), 159 deletions(-)
>
> diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
> index 7924375..a9faa75 100644
> --- a/src/gallium/drivers/radeonsi/si_pipe.c
> +++ b/src/gallium/drivers/radeonsi/si_pipe.c
> @@ -119,21 +119,21 @@ static void si_emit_string_marker(struct pipe_context *ctx,
> struct si_context *sctx = (struct si_context *)ctx;
>
> dd_parse_apitrace_marker(string, len, &sctx->apitrace_call_number);
> }
>
> static LLVMTargetMachineRef
> si_create_llvm_target_machine(struct si_screen *sscreen)
> {
> const char *triple = "amdgcn--";
>
> - return LLVMCreateTargetMachine(radeon_llvm_get_r600_target(triple), triple,
> + return LLVMCreateTargetMachine(si_llvm_get_amdgpu_target(triple), triple,
> r600_get_llvm_processor_name(sscreen->b.family),
> #if HAVE_LLVM >= 0x0308
> sscreen->b.debug_flags & DBG_SI_SCHED ?
> SI_LLVM_DEFAULT_FEATURES ",+si-scheduler" :
> #endif
> SI_LLVM_DEFAULT_FEATURES,
> LLVMCodeGenLevelDefault,
> LLVMRelocDefault,
> LLVMCodeModelDefault);
> }
> diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
> index bca07ac..cbf2090 100644
> --- a/src/gallium/drivers/radeonsi/si_shader.c
> +++ b/src/gallium/drivers/radeonsi/si_shader.c
> @@ -478,21 +478,21 @@ static LLVMValueRef get_bounded_indirect_index(struct si_shader_context *ctx,
> {
> LLVMValueRef result = get_indirect_index(ctx, ind, rel_index);
>
> /* LLVM 3.8: If indirect resource indexing is used:
> * - SI & CIK hang
> * - VI crashes
> */
> if (HAVE_LLVM <= 0x0308)
> return LLVMGetUndef(ctx->i32);
>
> - return radeon_llvm_bound_index(ctx, result, num);
> + return si_llvm_bound_index(ctx, result, num);
> }
>
>
> /**
> * Calculate a dword address given an input or output register and a stride.
> */
> static LLVMValueRef get_dw_address(struct si_shader_context *ctx,
> const struct tgsi_full_dst_register *dst,
> const struct tgsi_full_src_register *src,
> LLVMValueRef vertex_dw_stride,
> @@ -869,21 +869,21 @@ static LLVMValueRef buffer_load(struct lp_build_tgsi_context *bld_base,
> return LLVMBuildExtractElement(gallivm->builder, value,
> lp_build_const_int32(gallivm, swizzle), "");
> }
>
> value = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
> swizzle * 4, 1, 0);
>
> value2 = build_buffer_load(ctx, buffer, 1, NULL, base, offset,
> swizzle * 4 + 4, 1, 0);
>
> - return radeon_llvm_emit_fetch_64bit(bld_base, type, value, value2);
> + return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
> }
>
> /**
> * Load from LDS.
> *
> * \param type output value type
> * \param swizzle offset (typically 0..3); it can be ~0, which loads a vec4
> * \param dw_addr address in dwords
> */
> static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
> @@ -906,21 +906,21 @@ static LLVMValueRef lds_load(struct lp_build_tgsi_context *bld_base,
>
> dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
> lp_build_const_int32(gallivm, swizzle));
>
> value = build_indexed_load(ctx, ctx->lds, dw_addr, false);
> if (tgsi_type_is_64bit(type)) {
> LLVMValueRef value2;
> dw_addr = lp_build_add(&bld_base->uint_bld, dw_addr,
> lp_build_const_int32(gallivm, swizzle + 1));
> value2 = build_indexed_load(ctx, ctx->lds, dw_addr, false);
> - return radeon_llvm_emit_fetch_64bit(bld_base, type, value, value2);
> + return si_llvm_emit_fetch_64bit(bld_base, type, value, value2);
> }
>
> return LLVMBuildBitCast(gallivm->builder, value,
> tgsi2llvmtype(bld_base, type), "");
> }
>
> /**
> * Store to LDS.
> *
> * \param swizzle offset (typically 0..3)
> @@ -1008,21 +1008,21 @@ static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
> unsigned chan_index;
> LLVMValueRef dw_addr, stride;
> LLVMValueRef rw_buffers, buffer, base, buf_addr;
> LLVMValueRef values[4];
>
> /* Only handle per-patch and per-vertex outputs here.
> * Vectors will be lowered to scalars and this function will be called again.
> */
> if (reg->Register.File != TGSI_FILE_OUTPUT ||
> (dst[0] && LLVMGetTypeKind(LLVMTypeOf(dst[0])) == LLVMVectorTypeKind)) {
> - radeon_llvm_emit_store(bld_base, inst, info, dst);
> + si_llvm_emit_store(bld_base, inst, info, dst);
> return;
> }
>
> if (reg->Register.Dimension) {
> stride = unpack_param(ctx, SI_PARAM_TCS_OUT_LAYOUT, 13, 8);
> dw_addr = get_tcs_out_current_patch_offset(ctx);
> dw_addr = get_dw_address(ctx, reg, NULL, stride, dw_addr);
> } else {
> dw_addr = get_tcs_out_current_patch_data_offset(ctx);
> dw_addr = get_dw_address(ctx, reg, NULL, NULL, dw_addr);
> @@ -1034,21 +1034,21 @@ static void store_output_tcs(struct lp_build_tgsi_context *bld_base,
> lp_build_const_int32(gallivm, SI_HS_RING_TESS_OFFCHIP));
>
> base = LLVMGetParam(ctx->main_fn, ctx->param_oc_lds);
> buf_addr = get_tcs_tes_buffer_address_from_reg(ctx, reg, NULL);
>
>
> TGSI_FOR_EACH_DST0_ENABLED_CHANNEL(inst, chan_index) {
> LLVMValueRef value = dst[chan_index];
>
> if (inst->Instruction.Saturate)
> - value = radeon_llvm_saturate(bld_base, value);
> + value = si_llvm_saturate(bld_base, value);
>
> lds_store(bld_base, chan_index, dw_addr, value);
>
> value = LLVMBuildBitCast(gallivm->builder, value, ctx->i32, "");
> values[chan_index] = value;
>
> if (inst->Dst[0].Register.WriteMask != 0xF) {
> build_tbuffer_store_dwords(ctx, buffer, value, 1,
> buf_addr, base,
> 4 * chan_index);
> @@ -1127,22 +1127,22 @@ static LLVMValueRef fetch_input_gs(
> "llvm.SI.buffer.load.dword.i32.i32",
> ctx->i32, args, 9,
> LLVMReadOnlyAttribute);
> if (tgsi_type_is_64bit(type)) {
> LLVMValueRef value2;
> args[2] = lp_build_const_int32(gallivm, (param * 4 + swizzle + 1) * 256);
> value2 = lp_build_intrinsic(gallivm->builder,
> "llvm.SI.buffer.load.dword.i32.i32",
> ctx->i32, args, 9,
> LLVMReadOnlyAttribute);
> - return radeon_llvm_emit_fetch_64bit(bld_base, type,
> - value, value2);
> + return si_llvm_emit_fetch_64bit(bld_base, type,
> + value, value2);
> }
> return LLVMBuildBitCast(gallivm->builder,
> value,
> tgsi2llvmtype(bld_base, type), "");
> }
>
> static int lookup_interp_param_index(unsigned interpolate, unsigned location)
> {
> switch (interpolate) {
> case TGSI_INTERPOLATE_CONSTANT:
> @@ -1808,22 +1808,22 @@ static LLVMValueRef fetch_constant(
> desc = load_const_buffer_desc(ctx, buf);
> c0 = buffer_load_const(ctx, desc,
> LLVMConstInt(ctx->i32, idx * 4, 0));
>
> if (!tgsi_type_is_64bit(type))
> return bitcast(bld_base, type, c0);
> else {
> c1 = buffer_load_const(ctx, desc,
> LLVMConstInt(ctx->i32,
> (idx + 1) * 4, 0));
> - return radeon_llvm_emit_fetch_64bit(bld_base, type,
> - c0, c1);
> + return si_llvm_emit_fetch_64bit(bld_base, type,
> + c0, c1);
> }
> }
>
> if (reg->Register.Dimension && reg->Dimension.Indirect) {
> LLVMValueRef ptr = LLVMGetParam(ctx->main_fn, SI_PARAM_CONST_BUFFERS);
> LLVMValueRef index;
> index = get_bounded_indirect_index(ctx, ®->DimIndirect,
> reg->Dimension.Index,
> SI_NUM_CONST_BUFFERS);
> bufp = build_indexed_load_const(ctx, ptr, index);
> @@ -1843,22 +1843,22 @@ static LLVMValueRef fetch_constant(
> else {
> LLVMValueRef addr2, result2;
> addr2 = ctx->soa.addr[ireg->Index][ireg->Swizzle + 1];
> addr2 = LLVMBuildLoad(base->gallivm->builder, addr2, "load addr reg2");
> addr2 = lp_build_mul_imm(&bld_base->uint_bld, addr2, 16);
> addr2 = lp_build_add(&bld_base->uint_bld, addr2,
> lp_build_const_int32(base->gallivm, idx * 4));
>
> result2 = buffer_load_const(ctx, bufp, addr2);
>
> - result = radeon_llvm_emit_fetch_64bit(bld_base, type,
> - result, result2);
> + result = si_llvm_emit_fetch_64bit(bld_base, type,
> + result, result2);
> }
> return result;
> }
>
> /* Upper 16 bits must be zero. */
> static LLVMValueRef si_llvm_pack_two_int16(struct gallivm_state *gallivm,
> LLVMValueRef val[2])
> {
> return LLVMBuildOr(gallivm->builder, val[0],
> LLVMBuildShl(gallivm->builder, val[1],
> @@ -1961,21 +1961,21 @@ static void si_llvm_init_export_args(struct lp_build_tgsi_context *bld_base,
> ctx->i32, pack_args, 2,
> LLVMReadNoneAttribute);
> args[chan + 5] =
> LLVMBuildBitCast(base->gallivm->builder,
> packed, ctx->f32, "");
> }
> break;
>
> case V_028714_SPI_SHADER_UNORM16_ABGR:
> for (chan = 0; chan < 4; chan++) {
> - val[chan] = radeon_llvm_saturate(bld_base, values[chan]);
> + val[chan] = si_llvm_saturate(bld_base, values[chan]);
> val[chan] = LLVMBuildFMul(builder, val[chan],
> lp_build_const_float(gallivm, 65535), "");
> val[chan] = LLVMBuildFAdd(builder, val[chan],
> lp_build_const_float(gallivm, 0.5), "");
> val[chan] = LLVMBuildFPToUI(builder, val[chan],
> ctx->i32, "");
> }
>
> args[4] = uint->one; /* COMPR flag */
> args[5] = bitcast(bld_base, TGSI_TYPE_FLOAT,
> @@ -2806,21 +2806,21 @@ static void si_llvm_emit_vs_epilogue(struct lp_build_tgsi_context *bld_base)
> cond = LLVMGetParam(ctx->main_fn,
> SI_PARAM_VS_STATE_BITS);
> cond = LLVMBuildTrunc(gallivm->builder, cond,
> ctx->i1, "");
> lp_build_if(&if_ctx, gallivm, cond);
> }
>
> for (j = 0; j < 4; j++) {
> addr = ctx->soa.outputs[i][j];
> val = LLVMBuildLoad(gallivm->builder, addr, "");
> - val = radeon_llvm_saturate(bld_base, val);
> + val = si_llvm_saturate(bld_base, val);
> LLVMBuildStore(gallivm->builder, val, addr);
> }
> }
>
> if (cond)
> lp_build_endif(&if_ctx);
> }
>
> for (i = 0; i < info->num_outputs; i++) {
> outputs[i].name = info->output_semantic_name[i];
> @@ -2959,21 +2959,21 @@ static void si_export_mrt_color(struct lp_build_tgsi_context *bld_base,
> unsigned samplemask_param,
> bool is_last, struct si_ps_exports *exp)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct lp_build_context *base = &bld_base->base;
> int i;
>
> /* Clamp color */
> if (ctx->shader->key.ps.epilog.clamp_color)
> for (i = 0; i < 4; i++)
> - color[i] = radeon_llvm_saturate(bld_base, color[i]);
> + color[i] = si_llvm_saturate(bld_base, color[i]);
>
> /* Alpha to one */
> if (ctx->shader->key.ps.epilog.alpha_to_one)
> color[3] = base->one;
>
> /* Alpha test */
> if (index == 0 &&
> ctx->shader->key.ps.epilog.alpha_func != PIPE_FUNC_ALWAYS)
> si_alpha_test(bld_base, color[3]);
>
> @@ -4545,21 +4545,21 @@ static void tex_fetch_args(
> }
>
> /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
> * so the depth comparison value isn't clamped for Z16 and
> * Z24 anymore. Do it manually here.
> *
> * It's unnecessary if the original texture format was
> * Z32_FLOAT, but we don't know that here.
> */
> if (ctx->screen->b.chip_class == VI)
> - z = radeon_llvm_saturate(bld_base, z);
> + z = si_llvm_saturate(bld_base, z);
>
> address[count++] = z;
> }
>
> /* Pack user derivatives */
> if (opcode == TGSI_OPCODE_TXD) {
> int param, num_src_deriv_channels;
>
> switch (target) {
> case TGSI_TEXTURE_3D:
> @@ -5381,23 +5381,23 @@ static const struct lp_build_tgsi_action interp_action = {
> .emit = build_interp_intrinsic,
> };
>
> static void si_create_function(struct si_shader_context *ctx,
> LLVMTypeRef *returns, unsigned num_returns,
> LLVMTypeRef *params, unsigned num_params,
> int last_sgpr)
> {
> int i;
>
> - radeon_llvm_create_func(ctx, returns, num_returns,
> - params, num_params);
> - radeon_llvm_shader_type(ctx->main_fn, ctx->type);
> + si_llvm_create_func(ctx, returns, num_returns,
> + params, num_params);
> + si_llvm_shader_type(ctx->main_fn, ctx->type);
> ctx->return_value = LLVMGetUndef(ctx->return_type);
>
> for (i = 0; i <= last_sgpr; ++i) {
> LLVMValueRef P = LLVMGetParam(ctx->main_fn, i);
>
> /* The combination of:
> * - ByVal
> * - dereferenceable
> * - invariant.load
> * allows the optimization passes to move loads and reduces
> @@ -5705,47 +5705,47 @@ static void create_function(struct si_shader_context *ctx)
> }
>
> assert(num_params <= ARRAY_SIZE(params));
>
> si_create_function(ctx, returns, num_returns, params,
> num_params, last_sgpr);
>
> /* Reserve register locations for VGPR inputs the PS prolog may need. */
> if (ctx->type == PIPE_SHADER_FRAGMENT &&
> !ctx->is_monolithic) {
> - radeon_llvm_add_attribute(ctx->main_fn,
> - "InitialPSInputAddr",
> - S_0286D0_PERSP_SAMPLE_ENA(1) |
> - S_0286D0_PERSP_CENTER_ENA(1) |
> - S_0286D0_PERSP_CENTROID_ENA(1) |
> - S_0286D0_LINEAR_SAMPLE_ENA(1) |
> - S_0286D0_LINEAR_CENTER_ENA(1) |
> - S_0286D0_LINEAR_CENTROID_ENA(1) |
> - S_0286D0_FRONT_FACE_ENA(1) |
> - S_0286D0_POS_FIXED_PT_ENA(1));
> + si_llvm_add_attribute(ctx->main_fn,
> + "InitialPSInputAddr",
> + S_0286D0_PERSP_SAMPLE_ENA(1) |
> + S_0286D0_PERSP_CENTER_ENA(1) |
> + S_0286D0_PERSP_CENTROID_ENA(1) |
> + S_0286D0_LINEAR_SAMPLE_ENA(1) |
> + S_0286D0_LINEAR_CENTER_ENA(1) |
> + S_0286D0_LINEAR_CENTROID_ENA(1) |
> + S_0286D0_FRONT_FACE_ENA(1) |
> + S_0286D0_POS_FIXED_PT_ENA(1));
> } else if (ctx->type == PIPE_SHADER_COMPUTE) {
> const unsigned *properties = shader->selector->info.properties;
> unsigned max_work_group_size =
> properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] *
> properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] *
> properties[TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH];
>
> if (!max_work_group_size) {
> /* This is a variable group size compute shader,
> * compile it for the maximum possible group size.
> */
> max_work_group_size = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
> }
>
> - radeon_llvm_add_attribute(ctx->main_fn,
> - "amdgpu-max-work-group-size",
> - max_work_group_size);
> + si_llvm_add_attribute(ctx->main_fn,
> + "amdgpu-max-work-group-size",
> + max_work_group_size);
> }
>
> shader->info.num_input_sgprs = 0;
> shader->info.num_input_vgprs = 0;
>
> for (i = 0; i <= last_sgpr; ++i)
> shader->info.num_input_sgprs += llvm_get_type_size(params[i]) / 4;
>
> /* Unused fragment shader inputs are eliminated by the compiler,
> * so we don't know yet how many there will be.
> @@ -6240,21 +6240,21 @@ int si_compile_llvm(struct si_screen *sscreen,
> }
> }
>
> if (sscreen->record_llvm_ir) {
> char *ir = LLVMPrintModuleToString(mod);
> binary->llvm_ir_string = strdup(ir);
> LLVMDisposeMessage(ir);
> }
>
> if (!si_replace_shader(count, binary)) {
> - r = radeon_llvm_compile(mod, binary, tm, debug);
> + r = si_llvm_compile(mod, binary, tm, debug);
> if (r)
> return r;
> }
>
> si_shader_binary_read_config(binary, conf, 0);
>
> /* Enable 64-bit and 16-bit denormals, because there is no performance
> * cost.
> *
> * If denormals are enabled, all floating-point output modifiers are
> @@ -6356,37 +6356,37 @@ static int si_generate_gs_copy_shader(struct si_screen *sscreen,
>
> si_llvm_export_vs(bld_base, outputs, gsinfo->num_outputs);
>
> LLVMBuildRetVoid(gallivm->builder);
>
> /* Dump LLVM IR before any optimization passes */
> if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
> r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
> LLVMDumpModule(bld_base->base.gallivm->module);
>
> - radeon_llvm_finalize_module(ctx,
> + si_llvm_finalize_module(ctx,
> r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_GEOMETRY));
>
> r = si_compile_llvm(sscreen, &ctx->shader->binary,
> &ctx->shader->config, ctx->tm,
> bld_base->base.gallivm->module,
> debug, PIPE_SHADER_GEOMETRY,
> "GS Copy Shader");
> if (!r) {
> if (r600_can_dump_shader(&sscreen->b, PIPE_SHADER_GEOMETRY))
> fprintf(stderr, "GS Copy Shader:\n");
> si_shader_dump(sscreen, ctx->shader, debug,
> PIPE_SHADER_GEOMETRY, stderr);
> r = si_shader_binary_upload(sscreen, ctx->shader);
> }
>
> - radeon_llvm_dispose(ctx);
> + si_llvm_dispose(ctx);
>
> FREE(outputs);
> return r;
> }
>
> static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
> FILE *f)
> {
> int i;
>
> @@ -6443,21 +6443,21 @@ static void si_dump_shader_key(unsigned shader, union si_shader_key *key,
>
> static void si_init_shader_ctx(struct si_shader_context *ctx,
> struct si_screen *sscreen,
> struct si_shader *shader,
> LLVMTargetMachineRef tm)
> {
> struct lp_build_tgsi_context *bld_base;
> struct lp_build_tgsi_action tmpl = {};
>
> memset(ctx, 0, sizeof(*ctx));
> - radeon_llvm_context_init(
> + si_llvm_context_init(
> ctx, "amdgcn--",
> (shader && shader->selector) ? &shader->selector->info : NULL,
> (shader && shader->selector) ? shader->selector->tokens : NULL);
> si_shader_context_init_alu(&ctx->soa.bld_base);
> ctx->tm = tm;
> ctx->screen = sscreen;
> if (shader && shader->selector)
> ctx->type = shader->selector->info.processor;
> else
> ctx->type = -1;
> @@ -6637,31 +6637,31 @@ int si_compile_tgsi_shader(struct si_screen *sscreen,
> }
>
> si_llvm_build_ret(&ctx, ctx.return_value);
> mod = bld_base->base.gallivm->module;
>
> /* Dump LLVM IR before any optimization passes */
> if (sscreen->b.debug_flags & DBG_PREOPT_IR &&
> r600_can_dump_shader(&sscreen->b, ctx.type))
> LLVMDumpModule(mod);
>
> - radeon_llvm_finalize_module(&ctx,
> + si_llvm_finalize_module(&ctx,
> r600_extra_shader_checks(&sscreen->b, ctx.type));
>
> r = si_compile_llvm(sscreen, &shader->binary, &shader->config, tm,
> mod, debug, ctx.type, "TGSI shader");
> if (r) {
> fprintf(stderr, "LLVM failed to compile shader\n");
> goto out;
> }
>
> - radeon_llvm_dispose(&ctx);
> + si_llvm_dispose(&ctx);
>
> /* Validate SGPR and VGPR usage for compute to detect compiler bugs.
> * LLVM 3.9svn has this bug.
> */
> if (sel->type == PIPE_SHADER_COMPUTE) {
> unsigned *props = sel->info.properties;
> unsigned wave_size = 64;
> unsigned max_vgprs = 256;
> unsigned max_sgprs = sscreen->b.chip_class >= VI ? 800 : 512;
> unsigned max_sgprs_per_wave = 128;
> @@ -6904,29 +6904,29 @@ static bool si_compile_vs_prolog(struct si_screen *sscreen,
> LLVMGetParam(func, SI_SGPR_BASE_VERTEX), "");
> }
>
> index = LLVMBuildBitCast(gallivm->builder, index, ctx.f32, "");
> ret = LLVMBuildInsertValue(gallivm->builder, ret, index,
> num_params++, "");
> }
>
> /* Compile. */
> si_llvm_build_ret(&ctx, ret);
> - radeon_llvm_finalize_module(&ctx,
> + si_llvm_finalize_module(&ctx,
> r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
>
> if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
> gallivm->module, debug, ctx.type,
> "Vertex Shader Prolog"))
> status = false;
>
> - radeon_llvm_dispose(&ctx);
> + si_llvm_dispose(&ctx);
> return status;
> }
>
> /**
> * Compile the vertex shader epilog. This is also used by the tessellation
> * evaluation shader compiled as VS.
> *
> * The input is PrimitiveID.
> *
> * If PrimitiveID is required by the pixel shader, export it.
> @@ -6977,29 +6977,29 @@ static bool si_compile_vs_epilog(struct si_screen *sscreen,
> args[7] = uint->undef; /* Z */
> args[8] = uint->undef; /* W */
>
> lp_build_intrinsic(base->gallivm->builder, "llvm.SI.export",
> LLVMVoidTypeInContext(base->gallivm->context),
> args, 9, 0);
> }
>
> /* Compile. */
> LLVMBuildRetVoid(gallivm->builder);
> - radeon_llvm_finalize_module(&ctx,
> + si_llvm_finalize_module(&ctx,
> r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_VERTEX));
>
> if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
> gallivm->module, debug, ctx.type,
> "Vertex Shader Epilog"))
> status = false;
>
> - radeon_llvm_dispose(&ctx);
> + si_llvm_dispose(&ctx);
> return status;
> }
>
> /**
> * Create & compile a vertex shader epilog. This a helper used by VS and TES.
> */
> static bool si_get_vs_epilog(struct si_screen *sscreen,
> LLVMTargetMachineRef tm,
> struct si_shader *shader,
> struct pipe_debug_callback *debug,
> @@ -7131,29 +7131,29 @@ static bool si_compile_tcs_epilog(struct si_screen *sscreen,
> declare_tess_lds(&ctx);
> func = ctx.main_fn;
>
> si_write_tess_factors(bld_base,
> LLVMGetParam(func, last_sgpr + 1),
> LLVMGetParam(func, last_sgpr + 2),
> LLVMGetParam(func, last_sgpr + 3));
>
> /* Compile. */
> LLVMBuildRetVoid(gallivm->builder);
> - radeon_llvm_finalize_module(&ctx,
> + si_llvm_finalize_module(&ctx,
> r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_TESS_CTRL));
>
> if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
> gallivm->module, debug, ctx.type,
> "Tessellation Control Shader Epilog"))
> status = false;
>
> - radeon_llvm_dispose(&ctx);
> + si_llvm_dispose(&ctx);
> return status;
> }
>
> /**
> * Select and compile (or reuse) TCS parts (epilog).
> */
> static bool si_shader_select_tcs_parts(struct si_screen *sscreen,
> LLVMTargetMachineRef tm,
> struct si_shader *shader,
> struct pipe_debug_callback *debug)
> @@ -7416,29 +7416,29 @@ static bool si_compile_ps_prolog(struct si_screen *sscreen,
> }
>
> /* Tell LLVM to insert WQM instruction sequence when needed. */
> if (key->ps_prolog.wqm) {
> LLVMAddTargetDependentFunctionAttr(func,
> "amdgpu-ps-wqm-outputs", "");
> }
>
> /* Compile. */
> si_llvm_build_ret(&ctx, ret);
> - radeon_llvm_finalize_module(&ctx,
> + si_llvm_finalize_module(&ctx,
> r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
>
> if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
> gallivm->module, debug, ctx.type,
> "Fragment Shader Prolog"))
> status = false;
>
> - radeon_llvm_dispose(&ctx);
> + si_llvm_dispose(&ctx);
> return status;
> }
>
> /**
> * Compile the pixel shader epilog. This handles everything that must be
> * emulated for pixel shader exports. (alpha-test, format conversions, etc)
> */
> static bool si_compile_ps_epilog(struct si_screen *sscreen,
> LLVMTargetMachineRef tm,
> struct pipe_debug_callback *debug,
> @@ -7479,21 +7479,21 @@ static bool si_compile_ps_epilog(struct si_screen *sscreen,
> last_sgpr + 1 + PS_EPILOG_SAMPLEMASK_MIN_LOC + 1);
>
> assert(num_params <= ARRAY_SIZE(params));
>
> for (i = last_sgpr + 1; i < num_params; i++)
> params[i] = ctx.f32;
>
> /* Create the function. */
> si_create_function(&ctx, NULL, 0, params, num_params, last_sgpr);
> /* Disable elimination of unused inputs. */
> - radeon_llvm_add_attribute(ctx.main_fn,
> + si_llvm_add_attribute(ctx.main_fn,
> "InitialPSInputAddr", 0xffffff);
>
> /* Process colors. */
> unsigned vgpr = last_sgpr + 1;
> unsigned colors_written = key->ps_epilog.colors_written;
> int last_color_export = -1;
>
> /* Find the last color export. */
> if (!key->ps_epilog.writes_z &&
> !key->ps_epilog.writes_stencil &&
> @@ -7537,29 +7537,29 @@ static bool si_compile_ps_epilog(struct si_screen *sscreen,
> if (depth || stencil || samplemask)
> si_export_mrt_z(bld_base, depth, stencil, samplemask, &exp);
> else if (last_color_export == -1)
> si_export_null(bld_base);
>
> if (exp.num)
> si_emit_ps_exports(&ctx, &exp);
>
> /* Compile. */
> LLVMBuildRetVoid(gallivm->builder);
> - radeon_llvm_finalize_module(&ctx,
> + si_llvm_finalize_module(&ctx,
> r600_extra_shader_checks(&sscreen->b, PIPE_SHADER_FRAGMENT));
>
> if (si_compile_llvm(sscreen, &out->binary, &out->config, tm,
> gallivm->module, debug, ctx.type,
> "Fragment Shader Epilog"))
> status = false;
>
> - radeon_llvm_dispose(&ctx);
> + si_llvm_dispose(&ctx);
> return status;
> }
>
> /**
> * Select and compile (or reuse) pixel shader parts (prolog & epilog).
> */
> static bool si_shader_select_ps_parts(struct si_screen *sscreen,
> LLVMTargetMachineRef tm,
> struct si_shader *shader,
> struct pipe_debug_callback *debug)
> diff --git a/src/gallium/drivers/radeonsi/si_shader_internal.h b/src/gallium/drivers/radeonsi/si_shader_internal.h
> index 37001c0..84d8ed5 100644
> --- a/src/gallium/drivers/radeonsi/si_shader_internal.h
> +++ b/src/gallium/drivers/radeonsi/si_shader_internal.h
> @@ -36,21 +36,21 @@ struct pipe_debug_callback;
> struct radeon_shader_binary;
>
> #define RADEON_LLVM_MAX_INPUT_SLOTS 32
> #define RADEON_LLVM_MAX_INPUTS 32 * 4
> #define RADEON_LLVM_MAX_OUTPUTS 32 * 4
>
> #define RADEON_LLVM_INITIAL_CF_DEPTH 4
>
> #define RADEON_LLVM_MAX_SYSTEM_VALUES 4
>
> -struct radeon_llvm_flow;
> +struct si_llvm_flow;
>
> struct si_shader_context {
> struct lp_build_tgsi_soa_context soa;
> struct gallivm_state gallivm;
> struct si_shader *shader;
> struct si_screen *screen;
>
> unsigned type; /* PIPE_SHADER_* specifies the type of shader. */
> bool is_gs_copy_shader;
> /* Whether to generate the optimized shader variant compiled as a whole
> @@ -82,21 +82,21 @@ struct si_shader_context {
> LLVMValueRef outputs[RADEON_LLVM_MAX_OUTPUTS][TGSI_NUM_CHANNELS];
>
> /** This pointer is used to contain the temporary values.
> * The amount of temporary used in tgsi can't be bound to a max value and
> * thus we must allocate this array at runtime.
> */
> LLVMValueRef *temps;
> unsigned temps_count;
> LLVMValueRef system_values[RADEON_LLVM_MAX_SYSTEM_VALUES];
>
> - struct radeon_llvm_flow *flow;
> + struct si_llvm_flow *flow;
> unsigned flow_depth;
> unsigned flow_depth_max;
>
> struct tgsi_array_info *temp_arrays;
> LLVMValueRef *temp_array_allocas;
>
> LLVMValueRef undef_alloca;
>
> LLVMValueRef main_fn;
> LLVMTypeRef return_type;
> @@ -153,68 +153,68 @@ struct si_shader_context {
>
> LLVMValueRef shared_memory;
> };
>
> static inline struct si_shader_context *
> si_shader_context(struct lp_build_tgsi_context *bld_base)
> {
> return (struct si_shader_context*)bld_base;
> }
>
> -void radeon_llvm_add_attribute(LLVMValueRef F, const char *name, int value);
> -void radeon_llvm_shader_type(LLVMValueRef F, unsigned type);
> +void si_llvm_add_attribute(LLVMValueRef F, const char *name, int value);
> +void si_llvm_shader_type(LLVMValueRef F, unsigned type);
>
> -LLVMTargetRef radeon_llvm_get_r600_target(const char *triple);
> +LLVMTargetRef si_llvm_get_amdgpu_target(const char *triple);
>
> -unsigned radeon_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
> - LLVMTargetMachineRef tm,
> - struct pipe_debug_callback *debug);
> +unsigned si_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
> + LLVMTargetMachineRef tm,
> + struct pipe_debug_callback *debug);
>
> LLVMTypeRef tgsi2llvmtype(struct lp_build_tgsi_context *bld_base,
> enum tgsi_opcode_type type);
>
> LLVMValueRef bitcast(struct lp_build_tgsi_context *bld_base,
> enum tgsi_opcode_type type, LLVMValueRef value);
>
> -LLVMValueRef radeon_llvm_bound_index(struct si_shader_context *ctx,
> - LLVMValueRef index,
> - unsigned num);
> +LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx,
> + LLVMValueRef index,
> + unsigned num);
>
> -void radeon_llvm_context_init(struct si_shader_context *ctx,
> - const char *triple,
> - const struct tgsi_shader_info *info,
> - const struct tgsi_token *tokens);
> +void si_llvm_context_init(struct si_shader_context *ctx,
> + const char *triple,
> + const struct tgsi_shader_info *info,
> + const struct tgsi_token *tokens);
>
> -void radeon_llvm_create_func(struct si_shader_context *ctx,
> - LLVMTypeRef *return_types, unsigned num_return_elems,
> - LLVMTypeRef *ParamTypes, unsigned ParamCount);
> +void si_llvm_create_func(struct si_shader_context *ctx,
> + LLVMTypeRef *return_types, unsigned num_return_elems,
> + LLVMTypeRef *ParamTypes, unsigned ParamCount);
>
> -void radeon_llvm_dispose(struct si_shader_context *ctx);
> +void si_llvm_dispose(struct si_shader_context *ctx);
>
> -void radeon_llvm_finalize_module(struct si_shader_context *ctx,
> - bool run_verifier);
> +void si_llvm_finalize_module(struct si_shader_context *ctx,
> + bool run_verifier);
>
> -LLVMValueRef radeon_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
> - enum tgsi_opcode_type type,
> - LLVMValueRef ptr,
> - LLVMValueRef ptr2);
> +LLVMValueRef si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
> + enum tgsi_opcode_type type,
> + LLVMValueRef ptr,
> + LLVMValueRef ptr2);
>
> -LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
> - LLVMValueRef value);
> +LLVMValueRef si_llvm_saturate(struct lp_build_tgsi_context *bld_base,
> + LLVMValueRef value);
>
> -LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
> - const struct tgsi_full_src_register *reg,
> - enum tgsi_opcode_type type,
> - unsigned swizzle);
> +LLVMValueRef si_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
> + const struct tgsi_full_src_register *reg,
> + enum tgsi_opcode_type type,
> + unsigned swizzle);
>
> -void radeon_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
> - const struct tgsi_full_instruction *inst,
> - const struct tgsi_opcode_info *info,
> - LLVMValueRef dst[4]);
> +void si_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
> + const struct tgsi_full_instruction *inst,
> + const struct tgsi_opcode_info *info,
> + LLVMValueRef dst[4]);
>
> void si_shader_context_init_alu(struct lp_build_tgsi_context *bld_base);
> void si_prepare_cube_coords(struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data,
> LLVMValueRef *coords_arg,
> LLVMValueRef *derivs_arg);
>
> #endif
> diff --git a/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c b/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c
> index b0174b1..c7a08cc 100644
> --- a/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c
> +++ b/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c
> @@ -35,64 +35,64 @@
> #include "tgsi/tgsi_parse.h"
> #include "util/u_math.h"
> #include "util/u_memory.h"
> #include "util/u_debug.h"
>
> #include <stdio.h>
> #include <llvm-c/Transforms/Scalar.h>
>
> /* Data for if/else/endif and bgnloop/endloop control flow structures.
> */
> -struct radeon_llvm_flow {
> +struct si_llvm_flow {
> /* Loop exit or next part of if/else/endif. */
> LLVMBasicBlockRef next_block;
> LLVMBasicBlockRef loop_entry_block;
> };
>
> #define CPU_STRING_LEN 30
> #define FS_STRING_LEN 30
> #define TRIPLE_STRING_LEN 7
>
> /**
> * Shader types for the LLVM backend.
> */
> -enum radeon_llvm_shader_type {
> +enum si_llvm_shader_type {
> RADEON_LLVM_SHADER_PS = 0,
> RADEON_LLVM_SHADER_VS = 1,
> RADEON_LLVM_SHADER_GS = 2,
> RADEON_LLVM_SHADER_CS = 3,
> };
>
> -enum radeon_llvm_calling_convention {
> +enum si_llvm_calling_convention {
> RADEON_LLVM_AMDGPU_VS = 87,
> RADEON_LLVM_AMDGPU_GS = 88,
> RADEON_LLVM_AMDGPU_PS = 89,
> RADEON_LLVM_AMDGPU_CS = 90,
> };
>
> -void radeon_llvm_add_attribute(LLVMValueRef F, const char *name, int value)
> +void si_llvm_add_attribute(LLVMValueRef F, const char *name, int value)
> {
> char str[16];
>
> snprintf(str, sizeof(str), "%i", value);
> LLVMAddTargetDependentFunctionAttr(F, name, str);
> }
>
> /**
> * Set the shader type we want to compile
> *
> * @param type shader type to set
> */
> -void radeon_llvm_shader_type(LLVMValueRef F, unsigned type)
> +void si_llvm_shader_type(LLVMValueRef F, unsigned type)
> {
> - enum radeon_llvm_shader_type llvm_type;
> - enum radeon_llvm_calling_convention calling_conv;
> + enum si_llvm_shader_type llvm_type;
> + enum si_llvm_calling_convention calling_conv;
>
> switch (type) {
> case PIPE_SHADER_VERTEX:
> case PIPE_SHADER_TESS_CTRL:
> case PIPE_SHADER_TESS_EVAL:
> llvm_type = RADEON_LLVM_SHADER_VS;
> calling_conv = RADEON_LLVM_AMDGPU_VS;
> break;
> case PIPE_SHADER_GEOMETRY:
> llvm_type = RADEON_LLVM_SHADER_GS;
> @@ -106,68 +106,68 @@ void radeon_llvm_shader_type(LLVMValueRef F, unsigned type)
> llvm_type = RADEON_LLVM_SHADER_CS;
> calling_conv = RADEON_LLVM_AMDGPU_CS;
> break;
> default:
> unreachable("Unhandle shader type");
> }
>
> if (HAVE_LLVM >= 0x309)
> LLVMSetFunctionCallConv(F, calling_conv);
> else
> - radeon_llvm_add_attribute(F, "ShaderType", llvm_type);
> + si_llvm_add_attribute(F, "ShaderType", llvm_type);
> }
>
> -static void init_r600_target()
> +static void init_amdgpu_target()
> {
> gallivm_init_llvm_targets();
> #if HAVE_LLVM < 0x0307
> LLVMInitializeR600TargetInfo();
> LLVMInitializeR600Target();
> LLVMInitializeR600TargetMC();
> LLVMInitializeR600AsmPrinter();
> #else
> LLVMInitializeAMDGPUTargetInfo();
> LLVMInitializeAMDGPUTarget();
> LLVMInitializeAMDGPUTargetMC();
> LLVMInitializeAMDGPUAsmPrinter();
>
> #endif
> }
>
> -static once_flag init_r600_target_once_flag = ONCE_FLAG_INIT;
> +static once_flag init_amdgpu_target_once_flag = ONCE_FLAG_INIT;
>
> -LLVMTargetRef radeon_llvm_get_r600_target(const char *triple)
> +LLVMTargetRef si_llvm_get_amdgpu_target(const char *triple)
> {
> LLVMTargetRef target = NULL;
> char *err_message = NULL;
>
> - call_once(&init_r600_target_once_flag, init_r600_target);
> + call_once(&init_amdgpu_target_once_flag, init_amdgpu_target);
>
> if (LLVMGetTargetFromTriple(triple, &target, &err_message)) {
> fprintf(stderr, "Cannot find target for triple %s ", triple);
> if (err_message) {
> fprintf(stderr, "%s\n", err_message);
> }
> LLVMDisposeMessage(err_message);
> return NULL;
> }
> return target;
> }
>
> -struct radeon_llvm_diagnostics {
> +struct si_llvm_diagnostics {
> struct pipe_debug_callback *debug;
> unsigned retval;
> };
>
> -static void radeonDiagnosticHandler(LLVMDiagnosticInfoRef di, void *context)
> +static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
> {
> - struct radeon_llvm_diagnostics *diag = (struct radeon_llvm_diagnostics *)context;
> + struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
> LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
> char *description = LLVMGetDiagInfoDescription(di);
> const char *severity_str = NULL;
>
> switch (severity) {
> case LLVMDSError:
> severity_str = "error";
> break;
> case LLVMDSWarning:
> severity_str = "warning";
> @@ -191,39 +191,39 @@ static void radeonDiagnosticHandler(LLVMDiagnosticInfoRef di, void *context)
> }
>
> LLVMDisposeMessage(description);
> }
>
> /**
> * Compile an LLVM module to machine code.
> *
> * @returns 0 for success, 1 for failure
> */
> -unsigned radeon_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
> - LLVMTargetMachineRef tm,
> - struct pipe_debug_callback *debug)
> +unsigned si_llvm_compile(LLVMModuleRef M, struct radeon_shader_binary *binary,
> + LLVMTargetMachineRef tm,
> + struct pipe_debug_callback *debug)
> {
> - struct radeon_llvm_diagnostics diag;
> + struct si_llvm_diagnostics diag;
> char *err;
> LLVMContextRef llvm_ctx;
> LLVMMemoryBufferRef out_buffer;
> unsigned buffer_size;
> const char *buffer_data;
> LLVMBool mem_err;
>
> diag.debug = debug;
> diag.retval = 0;
>
> /* Setup Diagnostic Handler*/
> llvm_ctx = LLVMGetModuleContext(M);
>
> - LLVMContextSetDiagnosticHandler(llvm_ctx, radeonDiagnosticHandler, &diag);
> + LLVMContextSetDiagnosticHandler(llvm_ctx, si_diagnostic_handler, &diag);
>
> /* Compile IR*/
> mem_err = LLVMTargetMachineEmitToMemoryBuffer(tm, M, LLVMObjectFile, &err,
> &out_buffer);
>
> /* Process Errors/Warnings */
> if (mem_err) {
> fprintf(stderr, "%s: %s", __FUNCTION__, err);
> pipe_debug_message(debug, SHADER_INFO,
> "LLVM emit error: %s", err);
> @@ -278,23 +278,23 @@ LLVMValueRef bitcast(struct lp_build_tgsi_context *bld_base,
> if (dst_type)
> return LLVMBuildBitCast(builder, value, dst_type, "");
> else
> return value;
> }
>
> /**
> * Return a value that is equal to the given i32 \p index if it lies in [0,num)
> * or an undefined value in the same interval otherwise.
> */
> -LLVMValueRef radeon_llvm_bound_index(struct si_shader_context *ctx,
> - LLVMValueRef index,
> - unsigned num)
> +LLVMValueRef si_llvm_bound_index(struct si_shader_context *ctx,
> + LLVMValueRef index,
> + unsigned num)
> {
> struct gallivm_state *gallivm = &ctx->gallivm;
> LLVMBuilderRef builder = gallivm->builder;
> LLVMValueRef c_max = lp_build_const_int32(gallivm, num - 1);
> LLVMValueRef cc;
>
> if (util_is_power_of_two(num)) {
> index = LLVMBuildAnd(builder, index, c_max, "");
> } else {
> /* In theory, this MAX pattern should result in code that is
> @@ -303,42 +303,42 @@ LLVMValueRef radeon_llvm_bound_index(struct si_shader_context *ctx,
> * In practice, LLVM generates worse code (at the time of
> * writing), because its value tracking is not strong enough.
> */
> cc = LLVMBuildICmp(builder, LLVMIntULE, index, c_max, "");
> index = LLVMBuildSelect(builder, cc, index, c_max, "");
> }
>
> return index;
> }
>
> -static struct radeon_llvm_flow *
> +static struct si_llvm_flow *
> get_current_flow(struct si_shader_context *ctx)
> {
> if (ctx->flow_depth > 0)
> return &ctx->flow[ctx->flow_depth - 1];
> return NULL;
> }
>
> -static struct radeon_llvm_flow *
> +static struct si_llvm_flow *
> get_innermost_loop(struct si_shader_context *ctx)
> {
> for (unsigned i = ctx->flow_depth; i > 0; --i) {
> if (ctx->flow[i - 1].loop_entry_block)
> return &ctx->flow[i - 1];
> }
> return NULL;
> }
>
> -static struct radeon_llvm_flow *
> +static struct si_llvm_flow *
> push_flow(struct si_shader_context *ctx)
> {
> - struct radeon_llvm_flow *flow;
> + struct si_llvm_flow *flow;
>
> if (ctx->flow_depth >= ctx->flow_depth_max) {
> unsigned new_max = MAX2(ctx->flow_depth << 1, RADEON_LLVM_INITIAL_CF_DEPTH);
> ctx->flow = REALLOC(ctx->flow,
> ctx->flow_depth_max * sizeof(*ctx->flow),
> new_max * sizeof(*ctx->flow));
> ctx->flow_depth_max = new_max;
> }
>
> flow = &ctx->flow[ctx->flow_depth];
> @@ -476,42 +476,42 @@ get_pointer_into_array(struct si_shader_context *ctx,
> * descriptors).
> *
> * TODO It should be possible to avoid the additional instructions
> * if LLVM is changed so that it guarantuees:
> * 1. the scratch space descriptor isolates the current wave (this
> * could even save the scratch offset SGPR at the cost of an
> * additional SALU instruction)
> * 2. the memory for allocas must be allocated at the _end_ of the
> * scratch space (after spilled registers)
> */
> - index = radeon_llvm_bound_index(ctx, index, array->range.Last - array->range.First + 1);
> + index = si_llvm_bound_index(ctx, index, array->range.Last - array->range.First + 1);
>
> index = LLVMBuildMul(
> builder, index,
> lp_build_const_int32(gallivm, util_bitcount(array->writemask)),
> "");
> index = LLVMBuildAdd(
> builder, index,
> lp_build_const_int32(
> gallivm,
> util_bitcount(array->writemask & ((1 << swizzle) - 1))),
> "");
> idxs[0] = ctx->soa.bld_base.uint_bld.zero;
> idxs[1] = index;
> return LLVMBuildGEP(builder, alloca, idxs, 2, "");
> }
>
> LLVMValueRef
> -radeon_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
> - enum tgsi_opcode_type type,
> - LLVMValueRef ptr,
> - LLVMValueRef ptr2)
> +si_llvm_emit_fetch_64bit(struct lp_build_tgsi_context *bld_base,
> + enum tgsi_opcode_type type,
> + LLVMValueRef ptr,
> + LLVMValueRef ptr2)
> {
> LLVMBuilderRef builder = bld_base->base.gallivm->builder;
> LLVMValueRef result;
>
> result = LLVMGetUndef(LLVMVectorType(LLVMIntTypeInContext(bld_base->base.gallivm->context, 32), bld_base->base.type.length * 2));
>
> result = LLVMBuildInsertElement(builder,
> result,
> bitcast(bld_base, TGSI_TYPE_UNSIGNED, ptr),
> bld_base->int_bld.zero, "");
> @@ -534,21 +534,21 @@ emit_array_fetch(struct lp_build_tgsi_context *bld_base,
>
> unsigned i, size = range.Last - range.First + 1;
> LLVMTypeRef vec = LLVMVectorType(tgsi2llvmtype(bld_base, type), size);
> LLVMValueRef result = LLVMGetUndef(vec);
>
> struct tgsi_full_src_register tmp_reg = {};
> tmp_reg.Register.File = File;
>
> for (i = 0; i < size; ++i) {
> tmp_reg.Register.Index = i + range.First;
> - LLVMValueRef temp = radeon_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
> + LLVMValueRef temp = si_llvm_emit_fetch(bld_base, &tmp_reg, type, swizzle);
> result = LLVMBuildInsertElement(builder, result, temp,
> lp_build_const_int32(gallivm, i), "array_vector");
> }
> return result;
> }
>
> static LLVMValueRef
> load_value_from_array(struct lp_build_tgsi_context *bld_base,
> unsigned file,
> enum tgsi_opcode_type type,
> @@ -562,21 +562,21 @@ load_value_from_array(struct lp_build_tgsi_context *bld_base,
> LLVMBuilderRef builder = gallivm->builder;
> LLVMValueRef ptr;
>
> ptr = get_pointer_into_array(ctx, file, swizzle, reg_index, reg_indirect);
> if (ptr) {
> LLVMValueRef val = LLVMBuildLoad(builder, ptr, "");
> if (tgsi_type_is_64bit(type)) {
> LLVMValueRef ptr_hi, val_hi;
> ptr_hi = LLVMBuildGEP(builder, ptr, &bld_base->uint_bld.one, 1, "");
> val_hi = LLVMBuildLoad(builder, ptr_hi, "");
> - val = radeon_llvm_emit_fetch_64bit(bld_base, type, val, val_hi);
> + val = si_llvm_emit_fetch_64bit(bld_base, type, val, val_hi);
> }
>
> return val;
> } else {
> struct tgsi_declaration_range range =
> get_array_range(bld_base, file, reg_index, reg_indirect);
> LLVMValueRef index =
> emit_array_index(bld, reg_indirect, reg_index - range.First);
> LLVMValueRef array =
> emit_array_fetch(bld_base, file, type, range, swizzle);
> @@ -627,35 +627,35 @@ store_value_to_array(struct lp_build_tgsi_context *bld_base,
> default:
> continue;
> }
> value = LLVMBuildExtractElement(builder, array,
> lp_build_const_int32(gallivm, i), "");
> LLVMBuildStore(builder, value, temp_ptr);
> }
> }
> }
>
> -LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
> - const struct tgsi_full_src_register *reg,
> - enum tgsi_opcode_type type,
> - unsigned swizzle)
> +LLVMValueRef si_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
> + const struct tgsi_full_src_register *reg,
> + enum tgsi_opcode_type type,
> + unsigned swizzle)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
> LLVMBuilderRef builder = bld_base->base.gallivm->builder;
> LLVMValueRef result = NULL, ptr, ptr2;
>
> if (swizzle == ~0) {
> LLVMValueRef values[TGSI_NUM_CHANNELS];
> unsigned chan;
> for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) {
> - values[chan] = radeon_llvm_emit_fetch(bld_base, reg, type, chan);
> + values[chan] = si_llvm_emit_fetch(bld_base, reg, type, chan);
> }
> return lp_build_gather_values(bld_base->base.gallivm, values,
> TGSI_NUM_CHANNELS);
> }
>
> if (reg->Register.Indirect) {
> LLVMValueRef load = load_value_from_array(bld_base, reg->Register.File, type,
> swizzle, reg->Register.Index, ®->Indirect);
> return bitcast(bld_base, type, load);
> }
> @@ -689,45 +689,45 @@ LLVMValueRef radeon_llvm_emit_fetch(struct lp_build_tgsi_context *bld_base,
> if (ctx->soa.bld_base.info->processor == PIPE_SHADER_FRAGMENT)
> ctx->load_input(ctx, index, &ctx->input_decls[index], input);
> else
> memcpy(input, &ctx->inputs[index * 4], sizeof(input));
>
> result = input[swizzle];
>
> if (tgsi_type_is_64bit(type)) {
> ptr = result;
> ptr2 = input[swizzle + 1];
> - return radeon_llvm_emit_fetch_64bit(bld_base, type, ptr, ptr2);
> + return si_llvm_emit_fetch_64bit(bld_base, type, ptr, ptr2);
> }
> break;
> }
>
> case TGSI_FILE_TEMPORARY:
> if (reg->Register.Index >= ctx->temps_count)
> return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
> ptr = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle];
> if (tgsi_type_is_64bit(type)) {
> ptr2 = ctx->temps[reg->Register.Index * TGSI_NUM_CHANNELS + swizzle + 1];
> - return radeon_llvm_emit_fetch_64bit(bld_base, type,
> - LLVMBuildLoad(builder, ptr, ""),
> - LLVMBuildLoad(builder, ptr2, ""));
> + return si_llvm_emit_fetch_64bit(bld_base, type,
> + LLVMBuildLoad(builder, ptr, ""),
> + LLVMBuildLoad(builder, ptr2, ""));
> }
> result = LLVMBuildLoad(builder, ptr, "");
> break;
>
> case TGSI_FILE_OUTPUT:
> ptr = lp_get_output_ptr(bld, reg->Register.Index, swizzle);
> if (tgsi_type_is_64bit(type)) {
> ptr2 = lp_get_output_ptr(bld, reg->Register.Index, swizzle + 1);
> - return radeon_llvm_emit_fetch_64bit(bld_base, type,
> - LLVMBuildLoad(builder, ptr, ""),
> - LLVMBuildLoad(builder, ptr2, ""));
> + return si_llvm_emit_fetch_64bit(bld_base, type,
> + LLVMBuildLoad(builder, ptr, ""),
> + LLVMBuildLoad(builder, ptr2, ""));
> }
> result = LLVMBuildLoad(builder, ptr, "");
> break;
>
> default:
> return LLVMGetUndef(tgsi2llvmtype(bld_base, type));
> }
>
> return bitcast(bld_base, type, result);
> }
> @@ -917,39 +917,39 @@ static void emit_declaration(struct lp_build_tgsi_context *bld_base,
>
> case TGSI_FILE_MEMORY:
> ctx->declare_memory_region(ctx, decl);
> break;
>
> default:
> break;
> }
> }
>
> -LLVMValueRef radeon_llvm_saturate(struct lp_build_tgsi_context *bld_base,
> - LLVMValueRef value)
> +LLVMValueRef si_llvm_saturate(struct lp_build_tgsi_context *bld_base,
> + LLVMValueRef value)
> {
> struct lp_build_emit_data clamp_emit_data;
>
> memset(&clamp_emit_data, 0, sizeof(clamp_emit_data));
> clamp_emit_data.arg_count = 3;
> clamp_emit_data.args[0] = value;
> clamp_emit_data.args[2] = bld_base->base.one;
> clamp_emit_data.args[1] = bld_base->base.zero;
>
> return lp_build_emit_llvm(bld_base, TGSI_OPCODE_CLAMP,
> &clamp_emit_data);
> }
>
> -void radeon_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
> - const struct tgsi_full_instruction *inst,
> - const struct tgsi_opcode_info *info,
> - LLVMValueRef dst[4])
> +void si_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
> + const struct tgsi_full_instruction *inst,
> + const struct tgsi_opcode_info *info,
> + LLVMValueRef dst[4])
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
> struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
> const struct tgsi_full_dst_register *reg = &inst->Dst[0];
> LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
> LLVMValueRef temp_ptr, temp_ptr2 = NULL;
> unsigned chan, chan_index;
> bool is_vec_store = false;
> enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
> @@ -969,21 +969,21 @@ void radeon_llvm_emit_store(struct lp_build_tgsi_context *bld_base,
> bld_base->emit_store(bld_base, inst, info, values);
> return;
> }
>
> TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
> LLVMValueRef value = dst[chan_index];
>
> if (tgsi_type_is_64bit(dtype) && (chan_index == 1 || chan_index == 3))
> continue;
> if (inst->Instruction.Saturate)
> - value = radeon_llvm_saturate(bld_base, value);
> + value = si_llvm_saturate(bld_base, value);
>
> if (reg->Register.File == TGSI_FILE_ADDRESS) {
> temp_ptr = bld->addr[reg->Register.Index][chan_index];
> LLVMBuildStore(builder, value, temp_ptr);
> continue;
> }
>
> if (!tgsi_type_is_64bit(dtype))
> value = bitcast(bld_base, TGSI_TYPE_FLOAT, value);
>
> @@ -1046,21 +1046,21 @@ static void set_basicblock_name(LLVMBasicBlockRef bb, const char *base, int pc)
> /* Append a basic block at the level of the parent flow.
> */
> static LLVMBasicBlockRef append_basic_block(struct si_shader_context *ctx,
> const char *name)
> {
> struct gallivm_state *gallivm = &ctx->gallivm;
>
> assert(ctx->flow_depth >= 1);
>
> if (ctx->flow_depth >= 2) {
> - struct radeon_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
> + struct si_llvm_flow *flow = &ctx->flow[ctx->flow_depth - 2];
>
> return LLVMInsertBasicBlockInContext(gallivm->context,
> flow->next_block, name);
> }
>
> return LLVMAppendBasicBlockInContext(gallivm->context, ctx->main_fn, name);
> }
>
> /* Emit a branch to the given default target for the current block if
> * applicable -- that is, if the current block does not already contain a
> @@ -1071,112 +1071,112 @@ static void emit_default_branch(LLVMBuilderRef builder, LLVMBasicBlockRef target
> if (!LLVMGetBasicBlockTerminator(LLVMGetInsertBlock(builder)))
> LLVMBuildBr(builder, target);
> }
>
> static void bgnloop_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *flow = push_flow(ctx);
> + struct si_llvm_flow *flow = push_flow(ctx);
> flow->loop_entry_block = append_basic_block(ctx, "LOOP");
> flow->next_block = append_basic_block(ctx, "ENDLOOP");
> set_basicblock_name(flow->loop_entry_block, "loop", bld_base->pc);
> LLVMBuildBr(gallivm->builder, flow->loop_entry_block);
> LLVMPositionBuilderAtEnd(gallivm->builder, flow->loop_entry_block);
> }
>
> static void brk_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *flow = get_innermost_loop(ctx);
> + struct si_llvm_flow *flow = get_innermost_loop(ctx);
>
> LLVMBuildBr(gallivm->builder, flow->next_block);
> }
>
> static void cont_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *flow = get_innermost_loop(ctx);
> + struct si_llvm_flow *flow = get_innermost_loop(ctx);
>
> LLVMBuildBr(gallivm->builder, flow->loop_entry_block);
> }
>
> static void else_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *current_branch = get_current_flow(ctx);
> + struct si_llvm_flow *current_branch = get_current_flow(ctx);
> LLVMBasicBlockRef endif_block;
>
> assert(!current_branch->loop_entry_block);
>
> endif_block = append_basic_block(ctx, "ENDIF");
> emit_default_branch(gallivm->builder, endif_block);
>
> LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->next_block);
> set_basicblock_name(current_branch->next_block, "else", bld_base->pc);
>
> current_branch->next_block = endif_block;
> }
>
> static void endif_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *current_branch = get_current_flow(ctx);
> + struct si_llvm_flow *current_branch = get_current_flow(ctx);
>
> assert(!current_branch->loop_entry_block);
>
> emit_default_branch(gallivm->builder, current_branch->next_block);
> LLVMPositionBuilderAtEnd(gallivm->builder, current_branch->next_block);
> set_basicblock_name(current_branch->next_block, "endif", bld_base->pc);
>
> ctx->flow_depth--;
> }
>
> static void endloop_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *current_loop = get_current_flow(ctx);
> + struct si_llvm_flow *current_loop = get_current_flow(ctx);
>
> assert(current_loop->loop_entry_block);
>
> emit_default_branch(gallivm->builder, current_loop->loop_entry_block);
>
> LLVMPositionBuilderAtEnd(gallivm->builder, current_loop->next_block);
> set_basicblock_name(current_loop->next_block, "endloop", bld_base->pc);
> ctx->flow_depth--;
> }
>
> static void if_cond_emit(const struct lp_build_tgsi_action *action,
> struct lp_build_tgsi_context *bld_base,
> struct lp_build_emit_data *emit_data,
> LLVMValueRef cond)
> {
> struct si_shader_context *ctx = si_shader_context(bld_base);
> struct gallivm_state *gallivm = bld_base->base.gallivm;
> - struct radeon_llvm_flow *flow = push_flow(ctx);
> + struct si_llvm_flow *flow = push_flow(ctx);
> LLVMBasicBlockRef if_block;
>
> if_block = append_basic_block(ctx, "IF");
> flow->next_block = append_basic_block(ctx, "ELSE");
> set_basicblock_name(if_block, "if", bld_base->pc);
> LLVMBuildCondBr(gallivm->builder, cond, if_block, flow->next_block);
> LLVMPositionBuilderAtEnd(gallivm->builder, if_block);
> }
>
> static void if_emit(const struct lp_build_tgsi_action *action,
> @@ -1214,23 +1214,23 @@ static void emit_immediate(struct lp_build_tgsi_context *bld_base,
> struct si_shader_context *ctx = si_shader_context(bld_base);
>
> for (i = 0; i < 4; ++i) {
> ctx->soa.immediates[ctx->soa.num_immediates][i] =
> LLVMConstInt(bld_base->uint_bld.elem_type, imm->u[i].Uint, false );
> }
>
> ctx->soa.num_immediates++;
> }
>
> -void radeon_llvm_context_init(struct si_shader_context *ctx, const char *triple,
> - const struct tgsi_shader_info *info,
> - const struct tgsi_token *tokens)
> +void si_llvm_context_init(struct si_shader_context *ctx, const char *triple,
> + const struct tgsi_shader_info *info,
> + const struct tgsi_token *tokens)
> {
> struct lp_type type;
>
> /* Initialize the gallivm object:
> * We are only using the module, context, and builder fields of this struct.
> * This should be enough for us to be able to pass our gallivm struct to the
> * helper functions in the gallivm module.
> */
> memset(&ctx->gallivm, 0, sizeof (ctx->gallivm));
> memset(&ctx->soa, 0, sizeof(ctx->soa));
> @@ -1264,29 +1264,29 @@ void radeon_llvm_context_init(struct si_shader_context *ctx, const char *triple,
>
> lp_build_context_init(&bld_base->base, &ctx->gallivm, type);
> lp_build_context_init(&ctx->soa.bld_base.uint_bld, &ctx->gallivm, lp_uint_type(type));
> lp_build_context_init(&ctx->soa.bld_base.int_bld, &ctx->gallivm, lp_int_type(type));
> type.width *= 2;
> lp_build_context_init(&ctx->soa.bld_base.dbl_bld, &ctx->gallivm, type);
> lp_build_context_init(&ctx->soa.bld_base.uint64_bld, &ctx->gallivm, lp_uint_type(type));
> lp_build_context_init(&ctx->soa.bld_base.int64_bld, &ctx->gallivm, lp_int_type(type));
>
> bld_base->soa = 1;
> - bld_base->emit_store = radeon_llvm_emit_store;
> + bld_base->emit_store = si_llvm_emit_store;
> bld_base->emit_swizzle = emit_swizzle;
> bld_base->emit_declaration = emit_declaration;
> bld_base->emit_immediate = emit_immediate;
>
> - bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = radeon_llvm_emit_fetch;
> - bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = radeon_llvm_emit_fetch;
> - bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = radeon_llvm_emit_fetch;
> - bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = radeon_llvm_emit_fetch;
> + bld_base->emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = si_llvm_emit_fetch;
> + bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = si_llvm_emit_fetch;
> + bld_base->emit_fetch_funcs[TGSI_FILE_TEMPORARY] = si_llvm_emit_fetch;
> + bld_base->emit_fetch_funcs[TGSI_FILE_OUTPUT] = si_llvm_emit_fetch;
> bld_base->emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = fetch_system_value;
>
> /* metadata allowing 2.5 ULP */
> ctx->fpmath_md_kind = LLVMGetMDKindIDInContext(ctx->gallivm.context,
> "fpmath", 6);
> LLVMValueRef arg = lp_build_const_float(&ctx->gallivm, 2.5);
> ctx->fpmath_md_2p5_ulp = LLVMMDNodeInContext(ctx->gallivm.context,
> &arg, 1);
>
> /* Allocate outputs */
> @@ -1295,45 +1295,45 @@ void radeon_llvm_context_init(struct si_shader_context *ctx, const char *triple,
> bld_base->op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
> bld_base->op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
> bld_base->op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
> bld_base->op_actions[TGSI_OPCODE_IF].emit = if_emit;
> bld_base->op_actions[TGSI_OPCODE_UIF].emit = uif_emit;
> bld_base->op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
> bld_base->op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
> bld_base->op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
> }
>
> -void radeon_llvm_create_func(struct si_shader_context *ctx,
> - LLVMTypeRef *return_types, unsigned num_return_elems,
> - LLVMTypeRef *ParamTypes, unsigned ParamCount)
> +void si_llvm_create_func(struct si_shader_context *ctx,
> + LLVMTypeRef *return_types, unsigned num_return_elems,
> + LLVMTypeRef *ParamTypes, unsigned ParamCount)
> {
> LLVMTypeRef main_fn_type, ret_type;
> LLVMBasicBlockRef main_fn_body;
>
> if (num_return_elems)
> ret_type = LLVMStructTypeInContext(ctx->gallivm.context,
> return_types,
> num_return_elems, true);
> else
> ret_type = LLVMVoidTypeInContext(ctx->gallivm.context);
>
> /* Setup the function */
> ctx->return_type = ret_type;
> main_fn_type = LLVMFunctionType(ret_type, ParamTypes, ParamCount, 0);
> ctx->main_fn = LLVMAddFunction(ctx->gallivm.module, "main", main_fn_type);
> main_fn_body = LLVMAppendBasicBlockInContext(ctx->gallivm.context,
> ctx->main_fn, "main_body");
> LLVMPositionBuilderAtEnd(ctx->gallivm.builder, main_fn_body);
> }
>
> -void radeon_llvm_finalize_module(struct si_shader_context *ctx,
> - bool run_verifier)
> +void si_llvm_finalize_module(struct si_shader_context *ctx,
> + bool run_verifier)
> {
> struct gallivm_state *gallivm = ctx->soa.bld_base.base.gallivm;
> const char *triple = LLVMGetTarget(gallivm->module);
> LLVMTargetLibraryInfoRef target_library_info;
>
> /* Create the pass manager */
> gallivm->passmgr = LLVMCreateFunctionPassManagerForModule(
> gallivm->module);
>
> target_library_info = gallivm_create_target_library_info(triple);
> @@ -1355,21 +1355,21 @@ void radeon_llvm_finalize_module(struct si_shader_context *ctx,
> /* Run the pass */
> LLVMInitializeFunctionPassManager(gallivm->passmgr);
> LLVMRunFunctionPassManager(gallivm->passmgr, ctx->main_fn);
> LLVMFinalizeFunctionPassManager(gallivm->passmgr);
>
> LLVMDisposeBuilder(gallivm->builder);
> LLVMDisposePassManager(gallivm->passmgr);
> gallivm_dispose_target_library_info(target_library_info);
> }
>
> -void radeon_llvm_dispose(struct si_shader_context *ctx)
> +void si_llvm_dispose(struct si_shader_context *ctx)
> {
> LLVMDisposeModule(ctx->soa.bld_base.base.gallivm->module);
> LLVMContextDispose(ctx->soa.bld_base.base.gallivm->context);
> FREE(ctx->temp_arrays);
> ctx->temp_arrays = NULL;
> FREE(ctx->temp_array_allocas);
> ctx->temp_array_allocas = NULL;
> FREE(ctx->temps);
> ctx->temps = NULL;
> ctx->temps_count = 0;
>
More information about the mesa-dev
mailing list