[Mesa-dev] [PATCH v2 18/73] ac/nir: begin splitting off ac_nir_context

Nicolai Hähnle nhaehnle at gmail.com
Wed Jul 5 10:48:02 UTC 2017


From: Nicolai Hähnle <nicolai.haehnle at amd.com>

The eventual goal is to hide all radv-specific details behind
ac_nir_context::abi, so that the NIR->LLVM code can be re-used by
radeonsi.

During development, we live with a partial split, where some of the
NIR->LLVM code still relies on linking back to the nir_to_llvm_context
(which should ultimately be renamed to reflect that it's radv-specific).
The idea is to get rid of these backlinks over time.
---
 src/amd/common/ac_nir_to_llvm.c | 449 ++++++++++++++++++++++------------------
 src/amd/common/ac_nir_to_llvm.h |   6 +
 2 files changed, 254 insertions(+), 201 deletions(-)

diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c
index 40d9cc2..1b65368 100644
--- a/src/amd/common/ac_nir_to_llvm.c
+++ b/src/amd/common/ac_nir_to_llvm.c
@@ -47,35 +47,55 @@ enum radeon_llvm_calling_convention {
 #define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)
 #define RADEON_LLVM_MAX_OUTPUTS (VARYING_SLOT_VAR31 + 1)
 
 enum desc_type {
 	DESC_IMAGE,
 	DESC_FMASK,
 	DESC_SAMPLER,
 	DESC_BUFFER,
 };
 
+struct nir_to_llvm_context;
+
+struct ac_nir_context {
+	struct ac_llvm_context ac;
+	struct ac_shader_abi *abi;
+
+	gl_shader_stage stage;
+
+	struct hash_table *defs;
+	struct hash_table *phis;
+
+	LLVMBasicBlockRef continue_block;
+	LLVMBasicBlockRef break_block;
+
+	LLVMValueRef outputs[RADEON_LLVM_MAX_OUTPUTS * 4];
+
+	int num_locals;
+	LLVMValueRef *locals;
+
+	struct nir_to_llvm_context *nctx; /* TODO get rid of this */
+};
+
 struct nir_to_llvm_context {
 	struct ac_llvm_context ac;
 	const struct ac_nir_compiler_options *options;
 	struct ac_shader_variant_info *shader_info;
 	struct ac_shader_abi abi;
+	struct ac_nir_context *nir;
 
 	unsigned max_workgroup_size;
 	LLVMContextRef context;
 	LLVMModuleRef module;
 	LLVMBuilderRef builder;
 	LLVMValueRef main_function;
 
-	struct hash_table *defs;
-	struct hash_table *phis;
-
 	LLVMValueRef descriptor_sets[AC_UD_MAX_SETS];
 	LLVMValueRef ring_offsets;
 	LLVMValueRef push_constants;
 	LLVMValueRef num_work_groups;
 	LLVMValueRef workgroup_ids;
 	LLVMValueRef local_invocation_ids;
 	LLVMValueRef tg_size;
 
 	LLVMValueRef vertex_buffers;
 	LLVMValueRef rel_auto_id;
@@ -110,23 +130,20 @@ struct nir_to_llvm_context {
 
 	LLVMValueRef prim_mask;
 	LLVMValueRef sample_pos_offset;
 	LLVMValueRef persp_sample, persp_center, persp_centroid;
 	LLVMValueRef linear_sample, linear_center, linear_centroid;
 	LLVMValueRef front_face;
 	LLVMValueRef ancillary;
 	LLVMValueRef sample_coverage;
 	LLVMValueRef frag_pos[4];
 
-	LLVMBasicBlockRef continue_block;
-	LLVMBasicBlockRef break_block;
-
 	LLVMTypeRef i1;
 	LLVMTypeRef i8;
 	LLVMTypeRef i16;
 	LLVMTypeRef i32;
 	LLVMTypeRef i64;
 	LLVMTypeRef v2i32;
 	LLVMTypeRef v3i32;
 	LLVMTypeRef v4i32;
 	LLVMTypeRef v8i32;
 	LLVMTypeRef f64;
@@ -143,27 +160,24 @@ struct nir_to_llvm_context {
 	LLVMValueRef f32zero;
 	LLVMValueRef f32one;
 	LLVMValueRef v4f32empty;
 
 	unsigned uniform_md_kind;
 	LLVMValueRef empty_md;
 	gl_shader_stage stage;
 
 	LLVMValueRef lds;
 	LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
-	LLVMValueRef outputs[RADEON_LLVM_MAX_OUTPUTS * 4];
 
 	LLVMValueRef shared_memory;
 	uint64_t input_mask;
 	uint64_t output_mask;
-	int num_locals;
-	LLVMValueRef *locals;
 	uint8_t num_output_clips;
 	uint8_t num_output_culls;
 
 	bool has_ds_bpermute;
 
 	bool is_gs_copy_shader;
 	LLVMValueRef gs_next_vertex;
 	unsigned gs_max_out_vertices;
 
 	unsigned tes_primitive_mode;
@@ -1021,80 +1035,80 @@ build_store_values_extended(struct nir_to_llvm_context *ctx,
 	}
 
 	for (i = 0; i < value_count; i++) {
 		LLVMValueRef ptr = values[i * value_stride];
 		LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
 		LLVMValueRef value = LLVMBuildExtractElement(builder, vec, index, "");
 		LLVMBuildStore(builder, value, ptr);
 	}
 }
 
-static LLVMTypeRef get_def_type(struct nir_to_llvm_context *ctx,
+static LLVMTypeRef get_def_type(struct ac_nir_context *ctx,
                                 const nir_ssa_def *def)
 {
-	LLVMTypeRef type = LLVMIntTypeInContext(ctx->context, def->bit_size);
+	LLVMTypeRef type = LLVMIntTypeInContext(ctx->ac.context, def->bit_size);
 	if (def->num_components > 1) {
 		type = LLVMVectorType(type, def->num_components);
 	}
 	return type;
 }
 
-static LLVMValueRef get_src(struct nir_to_llvm_context *ctx, nir_src src)
+static LLVMValueRef get_src(struct ac_nir_context *nir, nir_src src)
 {
 	assert(src.is_ssa);
-	struct hash_entry *entry = _mesa_hash_table_search(ctx->defs, src.ssa);
+	struct hash_entry *entry = _mesa_hash_table_search(nir->defs, src.ssa);
 	return (LLVMValueRef)entry->data;
 }
 
 
-static LLVMBasicBlockRef get_block(struct nir_to_llvm_context *ctx,
+static LLVMBasicBlockRef get_block(struct ac_nir_context *nir,
                                    const struct nir_block *b)
 {
-	struct hash_entry *entry = _mesa_hash_table_search(ctx->defs, b);
+	struct hash_entry *entry = _mesa_hash_table_search(nir->defs, b);
 	return (LLVMBasicBlockRef)entry->data;
 }
 
-static LLVMValueRef get_alu_src(struct nir_to_llvm_context *ctx,
+static LLVMValueRef get_alu_src(struct ac_nir_context *ctx,
                                 nir_alu_src src,
                                 unsigned num_components)
 {
 	LLVMValueRef value = get_src(ctx, src.src);
 	bool need_swizzle = false;
 
 	assert(value);
 	LLVMTypeRef type = LLVMTypeOf(value);
 	unsigned src_components = LLVMGetTypeKind(type) == LLVMVectorTypeKind
 	                              ? LLVMGetVectorSize(type)
 	                              : 1;
 
 	for (unsigned i = 0; i < num_components; ++i) {
 		assert(src.swizzle[i] < src_components);
 		if (src.swizzle[i] != i)
 			need_swizzle = true;
 	}
 
 	if (need_swizzle || num_components != src_components) {
 		LLVMValueRef masks[] = {
-		    LLVMConstInt(ctx->i32, src.swizzle[0], false),
-		    LLVMConstInt(ctx->i32, src.swizzle[1], false),
-		    LLVMConstInt(ctx->i32, src.swizzle[2], false),
-		    LLVMConstInt(ctx->i32, src.swizzle[3], false)};
+		    LLVMConstInt(ctx->ac.i32, src.swizzle[0], false),
+		    LLVMConstInt(ctx->ac.i32, src.swizzle[1], false),
+		    LLVMConstInt(ctx->ac.i32, src.swizzle[2], false),
+		    LLVMConstInt(ctx->ac.i32, src.swizzle[3], false)};
 
 		if (src_components > 1 && num_components == 1) {
-			value = LLVMBuildExtractElement(ctx->builder, value,
+			value = LLVMBuildExtractElement(ctx->ac.builder, value,
 			                                masks[0], "");
 		} else if (src_components == 1 && num_components > 1) {
 			LLVMValueRef values[] = {value, value, value, value};
 			value = ac_build_gather_values(&ctx->ac, values, num_components);
 		} else {
 			LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
-			value = LLVMBuildShuffleVector(ctx->builder, value, value,
+			value = LLVMBuildShuffleVector(ctx->ac.builder, value, value,
 		                                       swizzle, "");
 		}
 	}
 	assert(!src.negate);
 	assert(!src.abs);
 	return value;
 }
 
 static LLVMValueRef emit_int_cmp(struct ac_llvm_context *ctx,
                                  LLVMIntPredicate pred, LLVMValueRef src0,
@@ -1498,21 +1512,21 @@ static LLVMValueRef emit_ddxy_interp(
 
 	for (i = 0; i < 2; i++) {
 		a = LLVMBuildExtractElement(ctx->builder, interp_ij,
 					    LLVMConstInt(ctx->i32, i, false), "");
 		result[i] = emit_ddxy(ctx, nir_op_fddx, a);
 		result[2+i] = emit_ddxy(ctx, nir_op_fddy, a);
 	}
 	return ac_build_gather_values(&ctx->ac, result, 4);
 }
 
-static void visit_alu(struct nir_to_llvm_context *ctx, const nir_alu_instr *instr)
+static void visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
 {
 	LLVMValueRef src[4], result = NULL;
 	unsigned num_components = instr->dest.dest.ssa.num_components;
 	unsigned src_components;
 	LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.dest.ssa);
 
 	assert(nir_op_infos[instr->op].num_inputs <= ARRAY_SIZE(src));
 	switch (instr->op) {
 	case nir_op_vec2:
 	case nir_op_vec3:
@@ -1532,114 +1546,114 @@ static void visit_alu(struct nir_to_llvm_context *ctx, const nir_alu_instr *inst
 	for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
 		src[i] = get_alu_src(ctx, instr->src[i], src_components);
 
 	switch (instr->op) {
 	case nir_op_fmov:
 	case nir_op_imov:
 		result = src[0];
 		break;
 	case nir_op_fneg:
 	        src[0] = to_float(&ctx->ac, src[0]);
-		result = LLVMBuildFNeg(ctx->builder, src[0], "");
+		result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
 		break;
 	case nir_op_ineg:
-		result = LLVMBuildNeg(ctx->builder, src[0], "");
+		result = LLVMBuildNeg(ctx->ac.builder, src[0], "");
 		break;
 	case nir_op_inot:
-		result = LLVMBuildNot(ctx->builder, src[0], "");
+		result = LLVMBuildNot(ctx->ac.builder, src[0], "");
 		break;
 	case nir_op_iadd:
-		result = LLVMBuildAdd(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildAdd(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_fadd:
 		src[0] = to_float(&ctx->ac, src[0]);
 		src[1] = to_float(&ctx->ac, src[1]);
-		result = LLVMBuildFAdd(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildFAdd(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_fsub:
 		src[0] = to_float(&ctx->ac, src[0]);
 		src[1] = to_float(&ctx->ac, src[1]);
-		result = LLVMBuildFSub(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildFSub(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_isub:
-		result = LLVMBuildSub(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildSub(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_imul:
-		result = LLVMBuildMul(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildMul(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_imod:
-		result = LLVMBuildSRem(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildSRem(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_umod:
-		result = LLVMBuildURem(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildURem(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_fmod:
 		src[0] = to_float(&ctx->ac, src[0]);
 		src[1] = to_float(&ctx->ac, src[1]);
 		result = ac_build_fdiv(&ctx->ac, src[0], src[1]);
 		result = emit_intrin_1f_param(&ctx->ac, "llvm.floor",
 		                              to_float_type(&ctx->ac, def_type), result);
-		result = LLVMBuildFMul(ctx->builder, src[1] , result, "");
-		result = LLVMBuildFSub(ctx->builder, src[0], result, "");
+		result = LLVMBuildFMul(ctx->ac.builder, src[1] , result, "");
+		result = LLVMBuildFSub(ctx->ac.builder, src[0], result, "");
 		break;
 	case nir_op_frem:
 		src[0] = to_float(&ctx->ac, src[0]);
 		src[1] = to_float(&ctx->ac, src[1]);
-		result = LLVMBuildFRem(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildFRem(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_irem:
-		result = LLVMBuildSRem(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildSRem(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_idiv:
-		result = LLVMBuildSDiv(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildSDiv(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_udiv:
-		result = LLVMBuildUDiv(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildUDiv(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_fmul:
 		src[0] = to_float(&ctx->ac, src[0]);
 		src[1] = to_float(&ctx->ac, src[1]);
-		result = LLVMBuildFMul(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_fdiv:
 		src[0] = to_float(&ctx->ac, src[0]);
 		src[1] = to_float(&ctx->ac, src[1]);
 		result = ac_build_fdiv(&ctx->ac, src[0], src[1]);
 		break;
 	case nir_op_frcp:
 		src[0] = to_float(&ctx->ac, src[0]);
-		result = ac_build_fdiv(&ctx->ac, ctx->f32one, src[0]);
+		result = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, src[0]);
 		break;
 	case nir_op_iand:
-		result = LLVMBuildAnd(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildAnd(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_ior:
-		result = LLVMBuildOr(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildOr(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_ixor:
-		result = LLVMBuildXor(ctx->builder, src[0], src[1], "");
+		result = LLVMBuildXor(ctx->ac.builder, src[0], src[1], "");
 		break;
 	case nir_op_ishl:
-		result = LLVMBuildShl(ctx->builder, src[0],
-				      LLVMBuildZExt(ctx->builder, src[1],
+		result = LLVMBuildShl(ctx->ac.builder, src[0],
+				      LLVMBuildZExt(ctx->ac.builder, src[1],
 						    LLVMTypeOf(src[0]), ""),
 				      "");
 		break;
 	case nir_op_ishr:
-		result = LLVMBuildAShr(ctx->builder, src[0],
-				       LLVMBuildZExt(ctx->builder, src[1],
+		result = LLVMBuildAShr(ctx->ac.builder, src[0],
+				       LLVMBuildZExt(ctx->ac.builder, src[1],
 						     LLVMTypeOf(src[0]), ""),
 				       "");
 		break;
 	case nir_op_ushr:
-		result = LLVMBuildLShr(ctx->builder, src[0],
-				       LLVMBuildZExt(ctx->builder, src[1],
+		result = LLVMBuildLShr(ctx->ac.builder, src[0],
+				       LLVMBuildZExt(ctx->ac.builder, src[1],
 						     LLVMTypeOf(src[0]), ""),
 				       "");
 		break;
 	case nir_op_ilt:
 		result = emit_int_cmp(&ctx->ac, LLVMIntSLT, src[0], src[1]);
 		break;
 	case nir_op_ine:
 		result = emit_int_cmp(&ctx->ac, LLVMIntNE, src[0], src[1]);
 		break;
 	case nir_op_ieq:
@@ -1727,21 +1741,21 @@ static void visit_alu(struct nir_to_llvm_context *ctx, const nir_alu_instr *inst
 		result = emit_intrin_1f_param(&ctx->ac, "llvm.exp2",
 		                              to_float_type(&ctx->ac, def_type), src[0]);
 		break;
 	case nir_op_flog2:
 		result = emit_intrin_1f_param(&ctx->ac, "llvm.log2",
 		                              to_float_type(&ctx->ac, def_type), src[0]);
 		break;
 	case nir_op_frsq:
 		result = emit_intrin_1f_param(&ctx->ac, "llvm.sqrt",
 		                              to_float_type(&ctx->ac, def_type), src[0]);
-		result = ac_build_fdiv(&ctx->ac, ctx->f32one, result);
+		result = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, result);
 		break;
 	case nir_op_fpow:
 		result = emit_intrin_2f_param(&ctx->ac, "llvm.pow",
 		                              to_float_type(&ctx->ac, def_type), src[0], src[1]);
 		break;
 	case nir_op_fmax:
 		result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
 		                              to_float_type(&ctx->ac, def_type), src[0], src[1]);
 		if (instr->dest.dest.ssa.bit_size == 32)
 			result = emit_intrin_1f_param(&ctx->ac, "llvm.canonicalize",
@@ -1763,69 +1777,69 @@ static void visit_alu(struct nir_to_llvm_context *ctx, const nir_alu_instr *inst
 	case nir_op_ibitfield_extract:
 		result = emit_bitfield_extract(&ctx->ac, true, src);
 		break;
 	case nir_op_ubitfield_extract:
 		result = emit_bitfield_extract(&ctx->ac, false, src);
 		break;
 	case nir_op_bitfield_insert:
 		result = emit_bitfield_insert(&ctx->ac, src[0], src[1], src[2], src[3]);
 		break;
 	case nir_op_bitfield_reverse:
-		result = ac_build_intrinsic(&ctx->ac, "llvm.bitreverse.i32", ctx->i32, src, 1, AC_FUNC_ATTR_READNONE);
+		result = ac_build_intrinsic(&ctx->ac, "llvm.bitreverse.i32", ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
 		break;
 	case nir_op_bit_count:
-		result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32", ctx->i32, src, 1, AC_FUNC_ATTR_READNONE);
+		result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32", ctx->ac.i32, src, 1, AC_FUNC_ATTR_READNONE);
 		break;
 	case nir_op_vec2:
 	case nir_op_vec3:
 	case nir_op_vec4:
 		for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
 			src[i] = to_integer(&ctx->ac, src[i]);
 		result = ac_build_gather_values(&ctx->ac, src, num_components);
 		break;
 	case nir_op_f2i32:
 	case nir_op_f2i64:
 		src[0] = to_float(&ctx->ac, src[0]);
-		result = LLVMBuildFPToSI(ctx->builder, src[0], def_type, "");
+		result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
 		break;
 	case nir_op_f2u32:
 	case nir_op_f2u64:
 		src[0] = to_float(&ctx->ac, src[0]);
-		result = LLVMBuildFPToUI(ctx->builder, src[0], def_type, "");
+		result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
 		break;
 	case nir_op_i2f32:
 	case nir_op_i2f64:
-		result = LLVMBuildSIToFP(ctx->builder, src[0], to_float_type(&ctx->ac, def_type), "");
+		result = LLVMBuildSIToFP(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
 		break;
 	case nir_op_u2f32:
 	case nir_op_u2f64:
-		result = LLVMBuildUIToFP(ctx->builder, src[0], to_float_type(&ctx->ac, def_type), "");
+		result = LLVMBuildUIToFP(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
 		break;
 	case nir_op_f2f64:
-		result = LLVMBuildFPExt(ctx->builder, src[0], to_float_type(&ctx->ac, def_type), "");
+		result = LLVMBuildFPExt(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
 		break;
 	case nir_op_f2f32:
-		result = LLVMBuildFPTrunc(ctx->builder, src[0], to_float_type(&ctx->ac, def_type), "");
+		result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
 		break;
 	case nir_op_u2u32:
 	case nir_op_u2u64:
 		if (get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < get_elem_bits(&ctx->ac, def_type))
-			result = LLVMBuildZExt(ctx->builder, src[0], def_type, "");
+			result = LLVMBuildZExt(ctx->ac.builder, src[0], def_type, "");
 		else
-			result = LLVMBuildTrunc(ctx->builder, src[0], def_type, "");
+			result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
 		break;
 	case nir_op_i2i32:
 	case nir_op_i2i64:
 		if (get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < get_elem_bits(&ctx->ac, def_type))
-			result = LLVMBuildSExt(ctx->builder, src[0], def_type, "");
+			result = LLVMBuildSExt(ctx->ac.builder, src[0], def_type, "");
 		else
-			result = LLVMBuildTrunc(ctx->builder, src[0], def_type, "");
+			result = LLVMBuildTrunc(ctx->ac.builder, src[0], def_type, "");
 		break;
 	case nir_op_bcsel:
 		result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
 		break;
 	case nir_op_find_lsb:
 		result = emit_find_lsb(&ctx->ac, src[0]);
 		break;
 	case nir_op_ufind_msb:
 		result = emit_ufind_msb(&ctx->ac, src[0]);
 		break;
@@ -1844,41 +1858,41 @@ static void visit_alu(struct nir_to_llvm_context *ctx, const nir_alu_instr *inst
 	case nir_op_f2b:
 		result = emit_f2b(&ctx->ac, src[0]);
 		break;
 	case nir_op_b2i:
 		result = emit_b2i(&ctx->ac, src[0]);
 		break;
 	case nir_op_i2b:
 		result = emit_i2b(&ctx->ac, src[0]);
 		break;
 	case nir_op_fquantize2f16:
-		result = emit_f2f16(ctx, src[0]);
+		result = emit_f2f16(ctx->nctx, src[0]);
 		break;
 	case nir_op_umul_high:
 		result = emit_umul_high(&ctx->ac, src[0], src[1]);
 		break;
 	case nir_op_imul_high:
 		result = emit_imul_high(&ctx->ac, src[0], src[1]);
 		break;
 	case nir_op_pack_half_2x16:
 		result = emit_pack_half_2x16(&ctx->ac, src[0]);
 		break;
 	case nir_op_unpack_half_2x16:
 		result = emit_unpack_half_2x16(&ctx->ac, src[0]);
 		break;
 	case nir_op_fddx:
 	case nir_op_fddy:
 	case nir_op_fddx_fine:
 	case nir_op_fddy_fine:
 	case nir_op_fddx_coarse:
 	case nir_op_fddy_coarse:
-		result = emit_ddxy(ctx, instr->op, src[0]);
+		result = emit_ddxy(ctx->nctx, instr->op, src[0]);
 		break;
 
 	case nir_op_unpack_64_2x32_split_x: {
 		assert(instr->src[0].src.ssa->num_components == 1);
 		LLVMValueRef tmp = LLVMBuildBitCast(ctx->builder, src[0],
 						    LLVMVectorType(ctx->i32, 2),
 						    "");
 		result = LLVMBuildExtractElement(ctx->builder, tmp,
 						 ctx->i32zero, "");
 		break;
@@ -1912,26 +1926,26 @@ static void visit_alu(struct nir_to_llvm_context *ctx, const nir_alu_instr *inst
 	}
 
 	if (result) {
 		assert(instr->dest.dest.is_ssa);
 		result = to_integer(&ctx->ac, result);
 		_mesa_hash_table_insert(ctx->defs, &instr->dest.dest.ssa,
 		                        result);
 	}
 }
 
-static void visit_load_const(struct nir_to_llvm_context *ctx,
+static void visit_load_const(struct ac_nir_context *ctx,
                              const nir_load_const_instr *instr)
 {
 	LLVMValueRef values[4], value = NULL;
 	LLVMTypeRef element_type =
-	    LLVMIntTypeInContext(ctx->context, instr->def.bit_size);
+	    LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
 
 	for (unsigned i = 0; i < instr->def.num_components; ++i) {
 		switch (instr->def.bit_size) {
 		case 32:
 			values[i] = LLVMConstInt(element_type,
 			                         instr->value.u32[i], false);
 			break;
 		case 64:
 			values[i] = LLVMConstInt(element_type,
 			                         instr->value.u64[i], false);
@@ -2178,21 +2192,21 @@ static LLVMValueRef build_tex_intrinsic(struct nir_to_llvm_context *ctx,
 		if (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT) {
 			return radv_lower_gather4_integer(ctx, args, instr);
 		}
 	}
 	return ac_build_image_opcode(&ctx->ac, args);
 }
 
 static LLVMValueRef visit_vulkan_resource_index(struct nir_to_llvm_context *ctx,
                                                 nir_intrinsic_instr *instr)
 {
-	LLVMValueRef index = get_src(ctx, instr->src[0]);
+	LLVMValueRef index = get_src(ctx->nir, instr->src[0]);
 	unsigned desc_set = nir_intrinsic_desc_set(instr);
 	unsigned binding = nir_intrinsic_binding(instr);
 	LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
 	struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
 	struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
 	unsigned base_offset = layout->binding[binding].offset;
 	LLVMValueRef offset, stride;
 
 	if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
 	    layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
@@ -2214,63 +2228,63 @@ static LLVMValueRef visit_vulkan_resource_index(struct nir_to_llvm_context *ctx,
 
 	return LLVMBuildLoad(ctx->builder, desc_ptr, "");
 }
 
 static LLVMValueRef visit_load_push_constant(struct nir_to_llvm_context *ctx,
                                              nir_intrinsic_instr *instr)
 {
 	LLVMValueRef ptr, addr;
 
 	addr = LLVMConstInt(ctx->i32, nir_intrinsic_base(instr), 0);
-	addr = LLVMBuildAdd(ctx->builder, addr, get_src(ctx, instr->src[0]), "");
+	addr = LLVMBuildAdd(ctx->builder, addr, get_src(ctx->nir, instr->src[0]), "");
 
 	ptr = ac_build_gep0(&ctx->ac, ctx->push_constants, addr);
-	ptr = cast_ptr(ctx, ptr, get_def_type(ctx, &instr->dest.ssa));
+	ptr = cast_ptr(ctx, ptr, get_def_type(ctx->nir, &instr->dest.ssa));
 
 	return LLVMBuildLoad(ctx->builder, ptr, "");
 }
 
 static LLVMValueRef visit_get_buffer_size(struct nir_to_llvm_context *ctx,
                                           const nir_intrinsic_instr *instr)
 {
-	LLVMValueRef desc = get_src(ctx, instr->src[0]);
+	LLVMValueRef desc = get_src(ctx->nir, instr->src[0]);
 
 	return get_buffer_size(ctx, desc, false);
 }
 static void visit_store_ssbo(struct nir_to_llvm_context *ctx,
                              nir_intrinsic_instr *instr)
 {
 	const char *store_name;
-	LLVMValueRef src_data = get_src(ctx, instr->src[0]);
+	LLVMValueRef src_data = get_src(ctx->nir, instr->src[0]);
 	LLVMTypeRef data_type = ctx->f32;
 	int elem_size_mult = get_elem_bits(&ctx->ac, LLVMTypeOf(src_data)) / 32;
 	int components_32bit = elem_size_mult * instr->num_components;
 	unsigned writemask = nir_intrinsic_write_mask(instr);
 	LLVMValueRef base_data, base_offset;
 	LLVMValueRef params[6];
 
 	if (ctx->stage == MESA_SHADER_FRAGMENT)
 		ctx->shader_info->fs.writes_memory = true;
 
-	params[1] = get_src(ctx, instr->src[1]);
+	params[1] = get_src(ctx->nir, instr->src[1]);
 	params[2] = LLVMConstInt(ctx->i32, 0, false); /* vindex */
 	params[4] = ctx->i1false;  /* glc */
 	params[5] = ctx->i1false;  /* slc */
 
 	if (components_32bit > 1)
 		data_type = LLVMVectorType(ctx->f32, components_32bit);
 
 	base_data = to_float(&ctx->ac, src_data);
 	base_data = trim_vector(ctx, base_data, instr->num_components);
 	base_data = LLVMBuildBitCast(ctx->builder, base_data,
 				     data_type, "");
-	base_offset = get_src(ctx, instr->src[2]);      /* voffset */
+	base_offset = get_src(ctx->nir, instr->src[2]);      /* voffset */
 	while (writemask) {
 		int start, count;
 		LLVMValueRef data;
 		LLVMValueRef offset;
 		LLVMValueRef tmp;
 		u_bit_scan_consecutive_range(&writemask, &start, &count);
 
 		/* Due to an LLVM limitation, split 3-element writes
 		 * into a 2-element and a 1-element write. */
 		if (count == 3) {
@@ -2325,26 +2339,26 @@ static void visit_store_ssbo(struct nir_to_llvm_context *ctx,
 static LLVMValueRef visit_atomic_ssbo(struct nir_to_llvm_context *ctx,
                                       const nir_intrinsic_instr *instr)
 {
 	const char *name;
 	LLVMValueRef params[6];
 	int arg_count = 0;
 	if (ctx->stage == MESA_SHADER_FRAGMENT)
 		ctx->shader_info->fs.writes_memory = true;
 
 	if (instr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap) {
-		params[arg_count++] = llvm_extract_elem(ctx, get_src(ctx, instr->src[3]), 0);
+		params[arg_count++] = llvm_extract_elem(ctx, get_src(ctx->nir, instr->src[3]), 0);
 	}
-	params[arg_count++] = llvm_extract_elem(ctx, get_src(ctx, instr->src[2]), 0);
-	params[arg_count++] = get_src(ctx, instr->src[0]);
+	params[arg_count++] = llvm_extract_elem(ctx, get_src(ctx->nir, instr->src[2]), 0);
+	params[arg_count++] = get_src(ctx->nir, instr->src[0]);
 	params[arg_count++] = LLVMConstInt(ctx->i32, 0, false); /* vindex */
-	params[arg_count++] = get_src(ctx, instr->src[1]);      /* voffset */
+	params[arg_count++] = get_src(ctx->nir, instr->src[1]);      /* voffset */
 	params[arg_count++] = ctx->i1false;  /* slc */
 
 	switch (instr->intrinsic) {
 	case nir_intrinsic_ssbo_atomic_add:
 		name = "llvm.amdgcn.buffer.atomic.add";
 		break;
 	case nir_intrinsic_ssbo_atomic_imin:
 		name = "llvm.amdgcn.buffer.atomic.smin";
 		break;
 	case nir_intrinsic_ssbo_atomic_umin:
@@ -2385,38 +2399,38 @@ static LLVMValueRef visit_load_buffer(struct nir_to_llvm_context *ctx,
 	int load_components;
 	int num_components = instr->num_components;
 	if (instr->dest.ssa.bit_size == 64)
 		num_components *= 2;
 
 	for (int i = 0; i < num_components; i += load_components) {
 		load_components = MIN2(num_components - i, 4);
 		const char *load_name;
 		LLVMTypeRef data_type = ctx->f32;
 		LLVMValueRef offset = LLVMConstInt(ctx->i32, i * 4, false);
-		offset = LLVMBuildAdd(ctx->builder, get_src(ctx, instr->src[1]), offset, "");
+		offset = LLVMBuildAdd(ctx->builder, get_src(ctx->nir, instr->src[1]), offset, "");
 
 		if (load_components == 3)
 			data_type = LLVMVectorType(ctx->f32, 4);
 		else if (load_components > 1)
 			data_type = LLVMVectorType(ctx->f32, load_components);
 
 		if (load_components >= 3)
 			load_name = "llvm.amdgcn.buffer.load.v4f32";
 		else if (load_components == 2)
 			load_name = "llvm.amdgcn.buffer.load.v2f32";
 		else if (load_components == 1)
 			load_name = "llvm.amdgcn.buffer.load.f32";
 		else
 			unreachable("unhandled number of components");
 
 		LLVMValueRef params[] = {
-			get_src(ctx, instr->src[0]),
+			get_src(ctx->nir, instr->src[0]),
 			LLVMConstInt(ctx->i32, 0, false),
 			offset,
 			ctx->i1false,
 			ctx->i1false,
 		};
 
 		results[i] = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
 
 	}
 
@@ -2428,50 +2442,50 @@ static LLVMValueRef visit_load_buffer(struct nir_to_llvm_context *ctx,
 			LLVMConstInt(ctx->i32, 4, false), LLVMConstInt(ctx->i32, 5, false),
 		        LLVMConstInt(ctx->i32, 6, false), LLVMConstInt(ctx->i32, 7, false)
 		};
 
 		LLVMValueRef swizzle = LLVMConstVector(masks, num_components);
 		ret = LLVMBuildShuffleVector(ctx->builder, results[0],
 					     results[num_components > 4 ? 1 : 0], swizzle, "");
 	}
 
 	return LLVMBuildBitCast(ctx->builder, ret,
-	                        get_def_type(ctx, &instr->dest.ssa), "");
+	                        get_def_type(ctx->nir, &instr->dest.ssa), "");
 }
 
 static LLVMValueRef visit_load_ubo_buffer(struct nir_to_llvm_context *ctx,
                                           const nir_intrinsic_instr *instr)
 {
 	LLVMValueRef results[8], ret;
-	LLVMValueRef rsrc = get_src(ctx, instr->src[0]);
-	LLVMValueRef offset = get_src(ctx, instr->src[1]);
+	LLVMValueRef rsrc = get_src(ctx->nir, instr->src[0]);
+	LLVMValueRef offset = get_src(ctx->nir, instr->src[1]);
 	int num_components = instr->num_components;
 
 	if (instr->dest.ssa.bit_size == 64)
 		num_components *= 2;
 
 	for (unsigned i = 0; i < num_components; ++i) {
 		LLVMValueRef params[] = {
 			rsrc,
 			LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->i32, 4 * i, 0),
 				     offset, "")
 		};
 		results[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.load.const.v4i32", ctx->f32,
 						params, 2,
 						AC_FUNC_ATTR_READNONE |
 						AC_FUNC_ATTR_LEGACY);
 	}
 
 
 	ret = ac_build_gather_values(&ctx->ac, results, instr->num_components);
 	return LLVMBuildBitCast(ctx->builder, ret,
-	                        get_def_type(ctx, &instr->dest.ssa), "");
+	                        get_def_type(ctx->nir, &instr->dest.ssa), "");
 }
 
 static void
 radv_get_deref_offset(struct nir_to_llvm_context *ctx, nir_deref_var *deref,
 		      bool vs_in, unsigned *vertex_index_out,
 		      LLVMValueRef *vertex_index_ref,
 		      unsigned *const_out, LLVMValueRef *indir_out)
 {
 	unsigned const_offset = 0;
 	nir_deref *tail = &deref->deref;
@@ -2479,21 +2493,21 @@ radv_get_deref_offset(struct nir_to_llvm_context *ctx, nir_deref_var *deref,
 
 	if (vertex_index_out != NULL || vertex_index_ref != NULL) {
 		tail = tail->child;
 		nir_deref_array *deref_array = nir_deref_as_array(tail);
 		if (vertex_index_out)
 			*vertex_index_out = deref_array->base_offset;
 
 		if (vertex_index_ref) {
 			LLVMValueRef vtx = LLVMConstInt(ctx->i32, deref_array->base_offset, false);
 			if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
-				vtx = LLVMBuildAdd(ctx->builder, vtx, get_src(ctx, deref_array->indirect), "");
+				vtx = LLVMBuildAdd(ctx->builder, vtx, get_src(ctx->nir, deref_array->indirect), "");
 			}
 			*vertex_index_ref = vtx;
 		}
 	}
 
 	if (deref->var->data.compact) {
 		assert(tail->child->deref_type == nir_deref_type_array);
 		assert(glsl_type_is_scalar(glsl_without_array(deref->var->type)));
 		nir_deref_array *deref_array = nir_deref_as_array(tail->child);
 		/* We always lower indirect dereferences for "compact" array vars. */
@@ -2510,21 +2524,21 @@ radv_get_deref_offset(struct nir_to_llvm_context *ctx, nir_deref_var *deref,
 		if (tail->deref_type == nir_deref_type_array) {
 			nir_deref_array *deref_array = nir_deref_as_array(tail);
 			LLVMValueRef index, stride, local_offset;
 			unsigned size = glsl_count_attribute_slots(tail->type, vs_in);
 
 			const_offset += size * deref_array->base_offset;
 			if (deref_array->deref_array_type == nir_deref_array_type_direct)
 				continue;
 
 			assert(deref_array->deref_array_type == nir_deref_array_type_indirect);
-			index = get_src(ctx, deref_array->indirect);
+			index = get_src(ctx->nir, deref_array->indirect);
 			stride = LLVMConstInt(ctx->i32, size, 0);
 			local_offset = LLVMBuildMul(ctx->builder, stride, index, "");
 
 			if (offset)
 				offset = LLVMBuildAdd(ctx->builder, offset, local_offset, "");
 			else
 				offset = local_offset;
 		} else if (tail->deref_type == nir_deref_type_struct) {
 			nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
 
@@ -2713,21 +2727,21 @@ load_tcs_input(struct nir_to_llvm_context *ctx,
 	dw_addr = get_tcs_in_current_patch_offset(ctx);
 	dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
 				 indir_index);
 
 	for (unsigned i = 0; i < instr->num_components; i++) {
 		value[i] = lds_load(ctx, dw_addr);
 		dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
 				       ctx->i32one, "");
 	}
 	result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
-	result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+	result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx->nir, &instr->dest.ssa), "");
 	return result;
 }
 
 static LLVMValueRef
 load_tcs_output(struct nir_to_llvm_context *ctx,
 	       nir_intrinsic_instr *instr)
 {
 	LLVMValueRef dw_addr, stride;
 	LLVMValueRef value[4], result;
 	LLVMValueRef vertex_index = NULL;
@@ -2750,21 +2764,21 @@ load_tcs_output(struct nir_to_llvm_context *ctx,
 
 	dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
 				 indir_index);
 
 	for (unsigned i = 0; i < instr->num_components; i++) {
 		value[i] = lds_load(ctx, dw_addr);
 		dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
 				       ctx->i32one, "");
 	}
 	result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
-	result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+	result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx->nir, &instr->dest.ssa), "");
 	return result;
 }
 
 static void
 store_tcs_output(struct nir_to_llvm_context *ctx,
 		 nir_intrinsic_instr *instr,
 		 LLVMValueRef src,
 		 unsigned writemask)
 {
 	LLVMValueRef stride, dw_addr;
@@ -2851,21 +2865,21 @@ load_tes_input(struct nir_to_llvm_context *ctx,
 	    is_compact && const_index > 3) {
 		const_index -= 3;
 		param++;
 	}
 	buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
 						     is_compact, vertex_index, indir_index);
 
 	result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, instr->num_components, NULL,
 				      buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
 	result = trim_vector(ctx, result, instr->num_components);
-	result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+	result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx->nir, &instr->dest.ssa), "");
 	return result;
 }
 
 static LLVMValueRef
 load_gs_input(struct nir_to_llvm_context *ctx,
 	      nir_intrinsic_instr *instr)
 {
 	LLVMValueRef indir_index, vtx_offset;
 	unsigned const_index;
 	LLVMValueRef args[9];
@@ -2946,49 +2960,49 @@ static LLVMValueRef visit_load_var(struct nir_to_llvm_context *ctx,
 				values[chan] = ctx->inputs[idx + chan + const_index * 4];
 		}
 		break;
 	case nir_var_local:
 		for (unsigned chan = 0; chan < ve; chan++) {
 			if (indir_index) {
 				unsigned count = glsl_count_attribute_slots(
 					instr->variables[0]->var->type, false);
 				count -= chan / 4;
 				LLVMValueRef tmp_vec = ac_build_gather_values_extended(
-						&ctx->ac, ctx->locals + idx + chan, count,
+						&ctx->ac, ctx->nir->locals + idx + chan, count,
 						4, true);
 
 				values[chan] = LLVMBuildExtractElement(ctx->builder,
 								       tmp_vec,
 								       indir_index, "");
 			} else {
-				values[chan] = LLVMBuildLoad(ctx->builder, ctx->locals[idx + chan + const_index * 4], "");
+				values[chan] = LLVMBuildLoad(ctx->builder, ctx->nir->locals[idx + chan + const_index * 4], "");
 			}
 		}
 		break;
 	case nir_var_shader_out:
 		if (ctx->stage == MESA_SHADER_TESS_CTRL)
 			return load_tcs_output(ctx, instr);
 		for (unsigned chan = 0; chan < ve; chan++) {
 			if (indir_index) {
 				unsigned count = glsl_count_attribute_slots(
 						instr->variables[0]->var->type, false);
 				count -= chan / 4;
 				LLVMValueRef tmp_vec = ac_build_gather_values_extended(
-						&ctx->ac, ctx->outputs + idx + chan, count,
+						&ctx->ac, ctx->nir->outputs + idx + chan, count,
 						4, true);
 
 				values[chan] = LLVMBuildExtractElement(ctx->builder,
 								       tmp_vec,
 								       indir_index, "");
 			} else {
 			values[chan] = LLVMBuildLoad(ctx->builder,
-						     ctx->outputs[idx + chan + const_index * 4],
+						     ctx->nir->outputs[idx + chan + const_index * 4],
 						     "");
 			}
 		}
 		break;
 	case nir_var_shared: {
 		LLVMValueRef ptr = get_shared_memory_ptr(ctx, idx, ctx->i32);
 		LLVMValueRef derived_ptr;
 
 		if (indir_index)
 			indir_index = LLVMBuildMul(ctx->builder, indir_index, LLVMConstInt(ctx->i32, 4, false), "");
@@ -3000,30 +3014,30 @@ static LLVMValueRef visit_load_var(struct nir_to_llvm_context *ctx,
 			derived_ptr = LLVMBuildGEP(ctx->builder, ptr, &index, 1, "");
 
 			values[chan] = LLVMBuildLoad(ctx->builder, derived_ptr, "");
 		}
 		break;
 	}
 	default:
 		unreachable("unhandle variable mode");
 	}
 	ret = ac_build_gather_values(&ctx->ac, values, ve);
-	return LLVMBuildBitCast(ctx->builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
+	return LLVMBuildBitCast(ctx->builder, ret, get_def_type(ctx->nir, &instr->dest.ssa), "");
 }
 
 static void
 visit_store_var(struct nir_to_llvm_context *ctx,
 				   nir_intrinsic_instr *instr)
 {
 	LLVMValueRef temp_ptr, value;
 	int idx = instr->variables[0]->var->data.driver_location;
-	LLVMValueRef src = to_float(&ctx->ac, get_src(ctx, instr->src[0]));
+	LLVMValueRef src = to_float(&ctx->ac, get_src(ctx->nir, instr->src[0]));
 	int writemask = instr->const_index[0];
 	LLVMValueRef indir_index;
 	unsigned const_index;
 	radv_get_deref_offset(ctx, instr->variables[0], false,
 	                      NULL, NULL, &const_index, &indir_index);
 
 	if (get_elem_bits(&ctx->ac, LLVMTypeOf(src)) == 64) {
 		int old_writemask = writemask;
 
 		src = LLVMBuildBitCast(ctx->builder, src,
@@ -3052,58 +3066,58 @@ visit_store_var(struct nir_to_llvm_context *ctx,
 
 			value = llvm_extract_elem(ctx, src, chan);
 
 			if (instr->variables[0]->var->data.compact)
 				stride = 1;
 			if (indir_index) {
 				unsigned count = glsl_count_attribute_slots(
 						instr->variables[0]->var->type, false);
 				count -= chan / 4;
 				LLVMValueRef tmp_vec = ac_build_gather_values_extended(
-						&ctx->ac, ctx->outputs + idx + chan, count,
+						&ctx->ac, ctx->nir->outputs + idx + chan, count,
 						stride, true);
 
 				if (get_llvm_num_components(tmp_vec) > 1) {
 					tmp_vec = LLVMBuildInsertElement(ctx->builder, tmp_vec,
 									 value, indir_index, "");
 				} else
 					tmp_vec = value;
-				build_store_values_extended(ctx, ctx->outputs + idx + chan,
+				build_store_values_extended(ctx, ctx->nir->outputs + idx + chan,
 							    count, stride, tmp_vec);
 
 			} else {
-				temp_ptr = ctx->outputs[idx + chan + const_index * stride];
+				temp_ptr = ctx->nir->outputs[idx + chan + const_index * stride];
 
 				LLVMBuildStore(ctx->builder, value, temp_ptr);
 			}
 		}
 		break;
 	case nir_var_local:
 		for (unsigned chan = 0; chan < 8; chan++) {
 			if (!(writemask & (1 << chan)))
 				continue;
 
 			value = llvm_extract_elem(ctx, src, chan);
 			if (indir_index) {
 				unsigned count = glsl_count_attribute_slots(
 					instr->variables[0]->var->type, false);
 				count -= chan / 4;
 				LLVMValueRef tmp_vec = ac_build_gather_values_extended(
-					&ctx->ac, ctx->locals + idx + chan, count,
+					&ctx->ac, ctx->nir->locals + idx + chan, count,
 					4, true);
 
 				tmp_vec = LLVMBuildInsertElement(ctx->builder, tmp_vec,
 								 value, indir_index, "");
-				build_store_values_extended(ctx, ctx->locals + idx + chan,
+				build_store_values_extended(ctx, ctx->nir->locals + idx + chan,
 							    count, 4, tmp_vec);
 			} else {
-				temp_ptr = ctx->locals[idx + chan + const_index * 4];
+				temp_ptr = ctx->nir->locals[idx + chan + const_index * 4];
 
 				LLVMBuildStore(ctx->builder, value, temp_ptr);
 			}
 		}
 		break;
 	case nir_var_shared: {
 		LLVMValueRef ptr = get_shared_memory_ptr(ctx, idx, ctx->i32);
 
 		if (indir_index)
 			indir_index = LLVMBuildMul(ctx->builder, indir_index, LLVMConstInt(ctx->i32, 4, false), "");
@@ -3233,28 +3247,28 @@ static LLVMValueRef adjust_sample_index_using_fmask(struct nir_to_llvm_context *
 	return sample_index;
 }
 
 static LLVMValueRef get_image_coords(struct nir_to_llvm_context *ctx,
 				     const nir_intrinsic_instr *instr)
 {
 	const struct glsl_type *type = instr->variables[0]->var->type;
 	if(instr->variables[0]->deref.child)
 		type = instr->variables[0]->deref.child->type;
 
-	LLVMValueRef src0 = get_src(ctx, instr->src[0]);
+	LLVMValueRef src0 = get_src(ctx->nir, instr->src[0]);
 	LLVMValueRef coords[4];
 	LLVMValueRef masks[] = {
 		LLVMConstInt(ctx->i32, 0, false), LLVMConstInt(ctx->i32, 1, false),
 		LLVMConstInt(ctx->i32, 2, false), LLVMConstInt(ctx->i32, 3, false),
 	};
 	LLVMValueRef res;
-	LLVMValueRef sample_index = llvm_extract_elem(ctx, get_src(ctx, instr->src[1]), 0);
+	LLVMValueRef sample_index = llvm_extract_elem(ctx, get_src(ctx->nir, instr->src[1]), 0);
 
 	int count;
 	enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
 	bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS ||
 			     dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
 	bool is_ms = (dim == GLSL_SAMPLER_DIM_MS ||
 		      dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
 
 	count = image_type_to_components_count(dim,
 					       glsl_sampler_type_is_array(type));
@@ -3318,21 +3332,21 @@ static LLVMValueRef visit_image_load(struct nir_to_llvm_context *ctx,
 	LLVMValueRef res;
 	char intrinsic_name[64];
 	const nir_variable *var = instr->variables[0]->var;
 	const struct glsl_type *type = var->type;
 	if(instr->variables[0]->deref.child)
 		type = instr->variables[0]->deref.child->type;
 
 	type = glsl_without_array(type);
 	if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
 		params[0] = get_sampler_desc(ctx, instr->variables[0], DESC_BUFFER);
-		params[1] = LLVMBuildExtractElement(ctx->builder, get_src(ctx, instr->src[0]),
+		params[1] = LLVMBuildExtractElement(ctx->builder, get_src(ctx->nir, instr->src[0]),
 						    LLVMConstInt(ctx->i32, 0, false), ""); /* vindex */
 		params[2] = LLVMConstInt(ctx->i32, 0, false); /* voffset */
 		params[3] = ctx->i1false;  /* glc */
 		params[4] = ctx->i1false;  /* slc */
 		res = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.load.format.v4f32", ctx->v4f32,
 					 params, 5, 0);
 
 		res = trim_vector(ctx, res, instr->dest.ssa.num_components);
 		res = to_integer(&ctx->ac, res);
 	} else {
@@ -3375,37 +3389,37 @@ static void visit_image_store(struct nir_to_llvm_context *ctx,
 {
 	LLVMValueRef params[8];
 	char intrinsic_name[64];
 	const nir_variable *var = instr->variables[0]->var;
 	const struct glsl_type *type = glsl_without_array(var->type);
 
 	if (ctx->stage == MESA_SHADER_FRAGMENT)
 		ctx->shader_info->fs.writes_memory = true;
 
 	if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
-		params[0] = to_float(&ctx->ac, get_src(ctx, instr->src[2])); /* data */
+		params[0] = to_float(&ctx->ac, get_src(ctx->nir, instr->src[2])); /* data */
 		params[1] = get_sampler_desc(ctx, instr->variables[0], DESC_BUFFER);
-		params[2] = LLVMBuildExtractElement(ctx->builder, get_src(ctx, instr->src[0]),
+		params[2] = LLVMBuildExtractElement(ctx->builder, get_src(ctx->nir, instr->src[0]),
 						    LLVMConstInt(ctx->i32, 0, false), ""); /* vindex */
 		params[3] = LLVMConstInt(ctx->i32, 0, false); /* voffset */
 		params[4] = ctx->i1false;  /* glc */
 		params[5] = ctx->i1false;  /* slc */
 		ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", ctx->voidt,
 				   params, 6, 0);
 	} else {
 		bool is_da = glsl_sampler_type_is_array(type) ||
 			     glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE;
 		LLVMValueRef da = is_da ? ctx->i1true : ctx->i1false;
 		LLVMValueRef glc = ctx->i1false;
 		LLVMValueRef slc = ctx->i1false;
 
-		params[0] = to_float(&ctx->ac, get_src(ctx, instr->src[2]));
+		params[0] = to_float(&ctx->ac, get_src(ctx->nir, instr->src[2]));
 		params[1] = get_image_coords(ctx, instr); /* coords */
 		params[2] = get_sampler_desc(ctx, instr->variables[0], DESC_IMAGE);
 		params[3] = LLVMConstInt(ctx->i32, 15, false); /* dmask */
 		if (HAVE_LLVM <= 0x0309) {
 			params[4] = ctx->i1false;  /* r128 */
 			params[5] = da;
 			params[6] = glc;
 			params[7] = slc;
 		} else {
 			LLVMValueRef lwe = ctx->i1false;
@@ -3464,27 +3478,27 @@ static LLVMValueRef visit_image_atomic(struct nir_to_llvm_context *ctx,
 	case nir_intrinsic_image_atomic_exchange:
 		atomic_name = "swap";
 		break;
 	case nir_intrinsic_image_atomic_comp_swap:
 		atomic_name = "cmpswap";
 		break;
 	default:
 		abort();
 	}
 
-	params[param_count++] = get_src(ctx, instr->src[2]);
+	params[param_count++] = get_src(ctx->nir, instr->src[2]);
 	if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap)
-		params[param_count++] = get_src(ctx, instr->src[3]);
+		params[param_count++] = get_src(ctx->nir, instr->src[3]);
 
 	if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
 		params[param_count++] = get_sampler_desc(ctx, instr->variables[0], DESC_BUFFER);
-		params[param_count++] = LLVMBuildExtractElement(ctx->builder, get_src(ctx, instr->src[0]),
+		params[param_count++] = LLVMBuildExtractElement(ctx->builder, get_src(ctx->nir, instr->src[0]),
 								LLVMConstInt(ctx->i32, 0, false), ""); /* vindex */
 		params[param_count++] = ctx->i32zero; /* voffset */
 		params[param_count++] = ctx->i1false;  /* slc */
 
 		length = snprintf(intrinsic_name, sizeof(intrinsic_name),
 				  "llvm.amdgcn.buffer.atomic.%s", atomic_name);
 	} else {
 		char coords_type[8];
 
 		bool da = glsl_sampler_type_is_array(type) ||
@@ -3571,21 +3585,21 @@ static void emit_barrier(struct nir_to_llvm_context *ctx)
 			   ctx->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
 }
 
 static void emit_discard_if(struct nir_to_llvm_context *ctx,
 			    const nir_intrinsic_instr *instr)
 {
 	LLVMValueRef cond;
 	ctx->shader_info->fs.can_discard = true;
 
 	cond = LLVMBuildICmp(ctx->builder, LLVMIntNE,
-			     get_src(ctx, instr->src[0]),
+			     get_src(ctx->nir, instr->src[0]),
 			     ctx->i32zero, "");
 
 	cond = LLVMBuildSelect(ctx->builder, cond,
 			       LLVMConstReal(ctx->f32, -1.0f),
 			       ctx->f32zero, "");
 	ac_build_kill(&ctx->ac, cond);
 }
 
 static LLVMValueRef
 visit_load_local_invocation_index(struct nir_to_llvm_context *ctx)
@@ -3596,25 +3610,25 @@ visit_load_local_invocation_index(struct nir_to_llvm_context *ctx)
 			      LLVMConstInt(ctx->i32, 0xfc0, false), "");
 
 	return LLVMBuildAdd(ctx->builder, result, thread_id, "");
 }
 
 static LLVMValueRef visit_var_atomic(struct nir_to_llvm_context *ctx,
 				     const nir_intrinsic_instr *instr)
 {
 	LLVMValueRef ptr, result;
 	int idx = instr->variables[0]->var->data.driver_location;
-	LLVMValueRef src = get_src(ctx, instr->src[0]);
+	LLVMValueRef src = get_src(ctx->nir, instr->src[0]);
 	ptr = get_shared_memory_ptr(ctx, idx, ctx->i32);
 
 	if (instr->intrinsic == nir_intrinsic_var_atomic_comp_swap) {
-		LLVMValueRef src1 = get_src(ctx, instr->src[1]);
+		LLVMValueRef src1 = get_src(ctx->nir, instr->src[1]);
 		result = LLVMBuildAtomicCmpXchg(ctx->builder,
 						ptr, src, src1,
 						LLVMAtomicOrderingSequentiallyConsistent,
 						LLVMAtomicOrderingSequentiallyConsistent,
 						false);
 	} else {
 		LLVMAtomicRMWBinOp op;
 		switch (instr->intrinsic) {
 		case nir_intrinsic_var_atomic_add:
 			op = LLVMAtomicRMWBinOpAdd;
@@ -3720,21 +3734,21 @@ static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx,
 	LLVMValueRef src_c0, src_c1;
 	LLVMValueRef src0;
 	int input_index = instr->variables[0]->var->data.location - VARYING_SLOT_VAR0;
 	switch (instr->intrinsic) {
 	case nir_intrinsic_interp_var_at_centroid:
 		location = INTERP_CENTROID;
 		break;
 	case nir_intrinsic_interp_var_at_sample:
 	case nir_intrinsic_interp_var_at_offset:
 		location = INTERP_CENTER;
-		src0 = get_src(ctx, instr->src[0]);
+		src0 = get_src(ctx->nir, instr->src[0]);
 		break;
 	default:
 		break;
 	}
 
 	if (instr->intrinsic == nir_intrinsic_interp_var_at_offset) {
 		src_c0 = to_float(&ctx->ac, LLVMBuildExtractElement(ctx->builder, src0, ctx->i32zero, ""));
 		src_c1 = to_float(&ctx->ac, LLVMBuildExtractElement(ctx->builder, src0, ctx->i32one, ""));
 	} else if (instr->intrinsic == nir_intrinsic_interp_var_at_sample) {
 		LLVMValueRef sample_position;
@@ -3837,21 +3851,21 @@ visit_emit_vertex(struct nir_to_llvm_context *ctx,
 				 LLVMConstInt(ctx->i32, ctx->gs_max_out_vertices, false), "");
 
 	kill = LLVMBuildSelect(ctx->builder, can_emit,
 			       LLVMConstReal(ctx->f32, 1.0f),
 			       LLVMConstReal(ctx->f32, -1.0f), "");
 	ac_build_kill(&ctx->ac, kill);
 
 	/* loop num outputs */
 	idx = 0;
 	for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
-		LLVMValueRef *out_ptr = &ctx->outputs[i * 4];
+		LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
 		int length = 4;
 		int slot = idx;
 		int slot_inc = 1;
 
 		if (!(ctx->output_mask & (1ull << i)))
 			continue;
 
 		if (i == VARYING_SLOT_CLIP_DIST0) {
 			/* pack clip and cull into a single set of slots */
 			length = ctx->num_output_clips + ctx->num_output_culls;
@@ -3899,21 +3913,21 @@ visit_load_tess_coord(struct nir_to_llvm_context *ctx,
 		ctx->f32zero,
 		ctx->f32zero,
 	};
 
 	if (ctx->tes_primitive_mode == GL_TRIANGLES)
 		coord[2] = LLVMBuildFSub(ctx->builder, ctx->f32one,
 					LLVMBuildFAdd(ctx->builder, coord[0], coord[1], ""), "");
 
 	LLVMValueRef result = ac_build_gather_values(&ctx->ac, coord, instr->num_components);
 	return LLVMBuildBitCast(ctx->builder, result,
-				get_def_type(ctx, &instr->dest.ssa), "");
+				get_def_type(ctx->nir, &instr->dest.ssa), "");
 }
 
 static void visit_intrinsic(struct nir_to_llvm_context *ctx,
                             nir_intrinsic_instr *instr)
 {
 	LLVMValueRef result = NULL;
 
 	switch (instr->intrinsic) {
 	case nir_intrinsic_load_work_group_id: {
 		result = ctx->workgroup_ids;
@@ -4080,21 +4094,21 @@ static void visit_intrinsic(struct nir_to_llvm_context *ctx,
 	case nir_intrinsic_load_patch_vertices_in:
 		result = LLVMConstInt(ctx->i32, ctx->options->key.tcs.input_vertices, false);
 		break;
 	default:
 		fprintf(stderr, "Unknown intrinsic: ");
 		nir_print_instr(&instr->instr, stderr);
 		fprintf(stderr, "\n");
 		break;
 	}
 	if (result) {
-		_mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
+		_mesa_hash_table_insert(ctx->nir->defs, &instr->dest.ssa, result);
 	}
 }
 
 static LLVMValueRef get_sampler_desc(struct nir_to_llvm_context *ctx,
 				     const nir_deref_var *deref,
 				     enum desc_type desc_type)
 {
 	unsigned desc_set = deref->var->data.descriptor_set;
 	LLVMValueRef list = ctx->descriptor_sets[desc_set];
 	struct radv_descriptor_set_layout *layout = ctx->options->layout->set[desc_set].layout;
@@ -4134,21 +4148,21 @@ static LLVMValueRef get_sampler_desc(struct nir_to_llvm_context *ctx,
 		unreachable("invalid desc_type\n");
 	}
 
 	if (deref->deref.child) {
 		const nir_deref_array *child =
 			(const nir_deref_array *)deref->deref.child;
 
 		assert(child->deref_array_type != nir_deref_array_type_wildcard);
 		offset += child->base_offset * stride;
 		if (child->deref_array_type == nir_deref_array_type_indirect) {
-			index = get_src(ctx, child->indirect);
+			index = get_src(ctx->nir, child->indirect);
 		}
 
 		constant_index = child->base_offset;
 	}
 	if (desc_type == DESC_SAMPLER && binding->immutable_samplers_offset &&
 	    (!index || binding->immutable_samplers_equal)) {
 		if (binding->immutable_samplers_equal)
 			constant_index = 0;
 
 		const uint32_t *samplers = radv_immutable_samplers(layout, binding);
@@ -4283,53 +4297,53 @@ static void visit_tex(struct nir_to_llvm_context *ctx, nir_tex_instr *instr)
 	LLVMValueRef ddx = NULL, ddy = NULL;
 	LLVMValueRef derivs[6];
 	unsigned chan, count = 0;
 	unsigned const_src = 0, num_deriv_comp = 0;
 	bool lod_is_zero = false;
 	tex_fetch_ptrs(ctx, instr, &res_ptr, &samp_ptr, &fmask_ptr);
 
 	for (unsigned i = 0; i < instr->num_srcs; i++) {
 		switch (instr->src[i].src_type) {
 		case nir_tex_src_coord:
-			coord = get_src(ctx, instr->src[i].src);
+			coord = get_src(ctx->nir, instr->src[i].src);
 			break;
 		case nir_tex_src_projector:
 			break;
 		case nir_tex_src_comparator:
-			comparator = get_src(ctx, instr->src[i].src);
+			comparator = get_src(ctx->nir, instr->src[i].src);
 			break;
 		case nir_tex_src_offset:
-			offsets = get_src(ctx, instr->src[i].src);
+			offsets = get_src(ctx->nir, instr->src[i].src);
 			const_src = i;
 			break;
 		case nir_tex_src_bias:
-			bias = get_src(ctx, instr->src[i].src);
+			bias = get_src(ctx->nir, instr->src[i].src);
 			break;
 		case nir_tex_src_lod: {
 			nir_const_value *val = nir_src_as_const_value(instr->src[i].src);
 
 			if (val && val->i32[0] == 0)
 				lod_is_zero = true;
-			lod = get_src(ctx, instr->src[i].src);
+			lod = get_src(ctx->nir, instr->src[i].src);
 			break;
 		}
 		case nir_tex_src_ms_index:
-			sample_index = get_src(ctx, instr->src[i].src);
+			sample_index = get_src(ctx->nir, instr->src[i].src);
 			break;
 		case nir_tex_src_ms_mcs:
 			break;
 		case nir_tex_src_ddx:
-			ddx = get_src(ctx, instr->src[i].src);
+			ddx = get_src(ctx->nir, instr->src[i].src);
 			num_deriv_comp = instr->src[i].src.ssa->num_components;
 			break;
 		case nir_tex_src_ddy:
-			ddy = get_src(ctx, instr->src[i].src);
+			ddy = get_src(ctx->nir, instr->src[i].src);
 			break;
 		case nir_tex_src_texture_offset:
 		case nir_tex_src_sampler_offset:
 		case nir_tex_src_plane:
 		default:
 			break;
 		}
 	}
 
 	if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
@@ -4547,110 +4561,110 @@ static void visit_tex(struct nir_to_llvm_context *ctx, nir_tex_instr *instr)
 		LLVMValueRef z = LLVMBuildExtractElement(ctx->builder, result, two, "");
 		z = LLVMBuildSDiv(ctx->builder, z, six, "");
 		result = LLVMBuildInsertElement(ctx->builder, result, z, two, "");
 	} else if (instr->dest.ssa.num_components != 4)
 		result = trim_vector(ctx, result, instr->dest.ssa.num_components);
 
 write_result:
 	if (result) {
 		assert(instr->dest.is_ssa);
 		result = to_integer(&ctx->ac, result);
-		_mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
+		_mesa_hash_table_insert(ctx->nir->defs, &instr->dest.ssa, result);
 	}
 }
 
 
-static void visit_phi(struct nir_to_llvm_context *ctx, nir_phi_instr *instr)
+static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
 {
 	LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa);
-	LLVMValueRef result = LLVMBuildPhi(ctx->builder, type, "");
+	LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
 
 	_mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
 	_mesa_hash_table_insert(ctx->phis, instr, result);
 }
 
-static void visit_post_phi(struct nir_to_llvm_context *ctx,
+static void visit_post_phi(struct ac_nir_context *ctx,
                            nir_phi_instr *instr,
                            LLVMValueRef llvm_phi)
 {
 	nir_foreach_phi_src(src, instr) {
 		LLVMBasicBlockRef block = get_block(ctx, src->pred);
 		LLVMValueRef llvm_src = get_src(ctx, src->src);
 
 		LLVMAddIncoming(llvm_phi, &llvm_src, &block, 1);
 	}
 }
 
-static void phi_post_pass(struct nir_to_llvm_context *ctx)
+static void phi_post_pass(struct ac_nir_context *ctx)
 {
 	struct hash_entry *entry;
 	hash_table_foreach(ctx->phis, entry) {
 		visit_post_phi(ctx, (nir_phi_instr*)entry->key,
 		               (LLVMValueRef)entry->data);
 	}
 }
 
 
-static void visit_ssa_undef(struct nir_to_llvm_context *ctx,
+static void visit_ssa_undef(struct ac_nir_context *ctx,
 			    const nir_ssa_undef_instr *instr)
 {
 	unsigned num_components = instr->def.num_components;
 	LLVMValueRef undef;
 
 	if (num_components == 1)
-		undef = LLVMGetUndef(ctx->i32);
+		undef = LLVMGetUndef(ctx->ac.i32);
 	else {
-		undef = LLVMGetUndef(LLVMVectorType(ctx->i32, num_components));
+		undef = LLVMGetUndef(LLVMVectorType(ctx->ac.i32, num_components));
 	}
 	_mesa_hash_table_insert(ctx->defs, &instr->def, undef);
 }
 
-static void visit_jump(struct nir_to_llvm_context *ctx,
+static void visit_jump(struct ac_nir_context *ctx,
 		       const nir_jump_instr *instr)
 {
 	switch (instr->type) {
 	case nir_jump_break:
-		LLVMBuildBr(ctx->builder, ctx->break_block);
-		LLVMClearInsertionPosition(ctx->builder);
+		LLVMBuildBr(ctx->ac.builder, ctx->break_block);
+		LLVMClearInsertionPosition(ctx->ac.builder);
 		break;
 	case nir_jump_continue:
-		LLVMBuildBr(ctx->builder, ctx->continue_block);
-		LLVMClearInsertionPosition(ctx->builder);
+		LLVMBuildBr(ctx->ac.builder, ctx->continue_block);
+		LLVMClearInsertionPosition(ctx->ac.builder);
 		break;
 	default:
 		fprintf(stderr, "Unknown NIR jump instr: ");
 		nir_print_instr(&instr->instr, stderr);
 		fprintf(stderr, "\n");
 		abort();
 	}
 }
 
-static void visit_cf_list(struct nir_to_llvm_context *ctx,
+static void visit_cf_list(struct ac_nir_context *ctx,
                           struct exec_list *list);
 
-static void visit_block(struct nir_to_llvm_context *ctx, nir_block *block)
+static void visit_block(struct ac_nir_context *ctx, nir_block *block)
 {
-	LLVMBasicBlockRef llvm_block = LLVMGetInsertBlock(ctx->builder);
+	LLVMBasicBlockRef llvm_block = LLVMGetInsertBlock(ctx->ac.builder);
 	nir_foreach_instr(instr, block)
 	{
 		switch (instr->type) {
 		case nir_instr_type_alu:
 			visit_alu(ctx, nir_instr_as_alu(instr));
 			break;
 		case nir_instr_type_load_const:
 			visit_load_const(ctx, nir_instr_as_load_const(instr));
 			break;
 		case nir_instr_type_intrinsic:
-			visit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
+			visit_intrinsic(ctx->nctx, nir_instr_as_intrinsic(instr));
 			break;
 		case nir_instr_type_tex:
-			visit_tex(ctx, nir_instr_as_tex(instr));
+			visit_tex(ctx->nctx, nir_instr_as_tex(instr));
 			break;
 		case nir_instr_type_phi:
 			visit_phi(ctx, nir_instr_as_phi(instr));
 			break;
 		case nir_instr_type_ssa_undef:
 			visit_ssa_undef(ctx, nir_instr_as_ssa_undef(instr));
 			break;
 		case nir_instr_type_jump:
 			visit_jump(ctx, nir_instr_as_jump(instr));
 			break;
@@ -4658,75 +4672,77 @@ static void visit_block(struct nir_to_llvm_context *ctx, nir_block *block)
 			fprintf(stderr, "Unknown NIR instr type: ");
 			nir_print_instr(instr, stderr);
 			fprintf(stderr, "\n");
 			abort();
 		}
 	}
 
 	_mesa_hash_table_insert(ctx->defs, block, llvm_block);
 }
 
-static void visit_if(struct nir_to_llvm_context *ctx, nir_if *if_stmt)
+static void visit_if(struct ac_nir_context *ctx, nir_if *if_stmt)
 {
 	LLVMValueRef value = get_src(ctx, if_stmt->condition);
 
+	LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->ac.builder));
 	LLVMBasicBlockRef merge_block =
-	    LLVMAppendBasicBlockInContext(ctx->context, ctx->main_function, "");
+	    LLVMAppendBasicBlockInContext(ctx->ac.context, fn, "");
 	LLVMBasicBlockRef if_block =
-	    LLVMAppendBasicBlockInContext(ctx->context, ctx->main_function, "");
+	    LLVMAppendBasicBlockInContext(ctx->ac.context, fn, "");
 	LLVMBasicBlockRef else_block = merge_block;
 	if (!exec_list_is_empty(&if_stmt->else_list))
 		else_block = LLVMAppendBasicBlockInContext(
-		    ctx->context, ctx->main_function, "");
+		    ctx->ac.context, fn, "");
 
-	LLVMValueRef cond = LLVMBuildICmp(ctx->builder, LLVMIntNE, value,
-	                                  LLVMConstInt(ctx->i32, 0, false), "");
-	LLVMBuildCondBr(ctx->builder, cond, if_block, else_block);
+	LLVMValueRef cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE, value,
+	                                  LLVMConstInt(ctx->ac.i32, 0, false), "");
+	LLVMBuildCondBr(ctx->ac.builder, cond, if_block, else_block);
 
-	LLVMPositionBuilderAtEnd(ctx->builder, if_block);
+	LLVMPositionBuilderAtEnd(ctx->ac.builder, if_block);
 	visit_cf_list(ctx, &if_stmt->then_list);
-	if (LLVMGetInsertBlock(ctx->builder))
-		LLVMBuildBr(ctx->builder, merge_block);
+	if (LLVMGetInsertBlock(ctx->ac.builder))
+		LLVMBuildBr(ctx->ac.builder, merge_block);
 
 	if (!exec_list_is_empty(&if_stmt->else_list)) {
-		LLVMPositionBuilderAtEnd(ctx->builder, else_block);
+		LLVMPositionBuilderAtEnd(ctx->ac.builder, else_block);
 		visit_cf_list(ctx, &if_stmt->else_list);
-		if (LLVMGetInsertBlock(ctx->builder))
-			LLVMBuildBr(ctx->builder, merge_block);
+		if (LLVMGetInsertBlock(ctx->ac.builder))
+			LLVMBuildBr(ctx->ac.builder, merge_block);
 	}
 
-	LLVMPositionBuilderAtEnd(ctx->builder, merge_block);
+	LLVMPositionBuilderAtEnd(ctx->ac.builder, merge_block);
 }
 
-static void visit_loop(struct nir_to_llvm_context *ctx, nir_loop *loop)
+static void visit_loop(struct ac_nir_context *ctx, nir_loop *loop)
 {
+	LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx->ac.builder));
 	LLVMBasicBlockRef continue_parent = ctx->continue_block;
 	LLVMBasicBlockRef break_parent = ctx->break_block;
 
 	ctx->continue_block =
-	    LLVMAppendBasicBlockInContext(ctx->context, ctx->main_function, "");
+	    LLVMAppendBasicBlockInContext(ctx->ac.context, fn, "");
 	ctx->break_block =
-	    LLVMAppendBasicBlockInContext(ctx->context, ctx->main_function, "");
+	    LLVMAppendBasicBlockInContext(ctx->ac.context, fn, "");
 
-	LLVMBuildBr(ctx->builder, ctx->continue_block);
-	LLVMPositionBuilderAtEnd(ctx->builder, ctx->continue_block);
+	LLVMBuildBr(ctx->ac.builder, ctx->continue_block);
+	LLVMPositionBuilderAtEnd(ctx->ac.builder, ctx->continue_block);
 	visit_cf_list(ctx, &loop->body);
 
-	if (LLVMGetInsertBlock(ctx->builder))
-		LLVMBuildBr(ctx->builder, ctx->continue_block);
-	LLVMPositionBuilderAtEnd(ctx->builder, ctx->break_block);
+	if (LLVMGetInsertBlock(ctx->ac.builder))
+		LLVMBuildBr(ctx->ac.builder, ctx->continue_block);
+	LLVMPositionBuilderAtEnd(ctx->ac.builder, ctx->break_block);
 
 	ctx->continue_block = continue_parent;
 	ctx->break_block = break_parent;
 }
 
-static void visit_cf_list(struct nir_to_llvm_context *ctx,
+static void visit_cf_list(struct ac_nir_context *ctx,
                           struct exec_list *list)
 {
 	foreach_list_typed(nir_cf_node, node, node, list)
 	{
 		switch (node->type) {
 		case nir_cf_node_block:
 			visit_block(ctx, nir_cf_node_as_block(node));
 			break;
 
 		case nir_cf_node_if:
@@ -4989,46 +5005,46 @@ handle_shader_output_decl(struct nir_to_llvm_context *ctx,
 			if (length > 4)
 				attrib_count = 2;
 			else
 				attrib_count = 1;
 			mask_attribs = 1ull << idx;
 		}
 	}
 
 	for (unsigned i = 0; i < attrib_count; ++i) {
 		for (unsigned chan = 0; chan < 4; chan++) {
-			ctx->outputs[radeon_llvm_reg_index_soa(idx + i, chan)] =
+			ctx->nir->outputs[radeon_llvm_reg_index_soa(idx + i, chan)] =
 		                       si_build_alloca_undef(ctx, ctx->f32, "");
 		}
 	}
 	ctx->output_mask |= mask_attribs;
 }
 
 static void
-setup_locals(struct nir_to_llvm_context *ctx,
+setup_locals(struct ac_nir_context *ctx,
 	     struct nir_function *func)
 {
 	int i, j;
 	ctx->num_locals = 0;
 	nir_foreach_variable(variable, &func->impl->locals) {
 		unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
 		variable->data.driver_location = ctx->num_locals * 4;
 		ctx->num_locals += attrib_count;
 	}
 	ctx->locals = malloc(4 * ctx->num_locals * sizeof(LLVMValueRef));
 	if (!ctx->locals)
 	    return;
 
 	for (i = 0; i < ctx->num_locals; i++) {
 		for (j = 0; j < 4; j++) {
 			ctx->locals[i * 4 + j] =
-				si_build_alloca_undef(ctx, ctx->f32, "temp");
+				si_build_alloca_undef(ctx->nctx, ctx->ac.f32, "temp");
 		}
 	}
 }
 
 static LLVMValueRef
 emit_float_saturate(struct ac_llvm_context *ctx, LLVMValueRef v, float lo, float hi)
 {
 	v = to_float(ctx, v);
 	v = emit_intrin_2f_param(ctx, "llvm.maxnum.f32", ctx->f32, v, LLVMConstReal(ctx->f32, lo));
 	return emit_intrin_2f_param(ctx, "llvm.minnum.f32", ctx->f32, v, LLVMConstReal(ctx->f32, hi));
@@ -5217,21 +5233,21 @@ handle_vs_outputs_post(struct nir_to_llvm_context *ctx,
 	if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
 		LLVMValueRef slots[8];
 		unsigned j;
 
 		if (outinfo->cull_dist_mask)
 			outinfo->cull_dist_mask <<= ctx->num_output_clips;
 
 		i = VARYING_SLOT_CLIP_DIST0;
 		for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
 			slots[j] = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
-							       ctx->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
+							       ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
 
 		for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
 			slots[i] = LLVMGetUndef(ctx->f32);
 
 		if (ctx->num_output_clips + ctx->num_output_culls > 4) {
 			target = V_008DFC_SQ_EXP_POS + 3;
 			si_llvm_init_export_args(ctx, &slots[4], target, &args);
 			memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
 			       &args, sizeof(args));
 		}
@@ -5243,21 +5259,21 @@ handle_vs_outputs_post(struct nir_to_llvm_context *ctx,
 
 	}
 
 	for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
 		LLVMValueRef values[4];
 		if (!(ctx->output_mask & (1ull << i)))
 			continue;
 
 		for (unsigned j = 0; j < 4; j++)
 			values[j] = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
-					      ctx->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
+					      ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
 
 		if (i == VARYING_SLOT_POS) {
 			target = V_008DFC_SQ_EXP_POS;
 		} else if (i == VARYING_SLOT_CLIP_DIST0) {
 			continue;
 		} else if (i == VARYING_SLOT_PSIZ) {
 			outinfo->writes_pointsize = true;
 			psize_value = values[0];
 			continue;
 		} else if (i == VARYING_SLOT_LAYER) {
@@ -5364,21 +5380,21 @@ handle_vs_outputs_post(struct nir_to_llvm_context *ctx,
 	outinfo->param_exports = param_count;
 }
 
 static void
 handle_es_outputs_post(struct nir_to_llvm_context *ctx,
 		       struct ac_es_output_info *outinfo)
 {
 	int j;
 	uint64_t max_output_written = 0;
 	for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
-		LLVMValueRef *out_ptr = &ctx->outputs[i * 4];
+		LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
 		int param_index;
 		int length = 4;
 
 		if (!(ctx->output_mask & (1ull << i)))
 			continue;
 
 		if (i == VARYING_SLOT_CLIP_DIST0)
 			length = ctx->num_output_clips + ctx->num_output_culls;
 
 		param_index = shader_io_get_unique_index(i);
@@ -5402,21 +5418,21 @@ handle_es_outputs_post(struct nir_to_llvm_context *ctx,
 
 static void
 handle_ls_outputs_post(struct nir_to_llvm_context *ctx)
 {
 	LLVMValueRef vertex_id = ctx->rel_auto_id;
 	LLVMValueRef vertex_dw_stride = unpack_param(ctx, ctx->ls_out_layout, 13, 8);
 	LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->builder, vertex_id,
 						 vertex_dw_stride, "");
 
 	for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
-		LLVMValueRef *out_ptr = &ctx->outputs[i * 4];
+		LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
 		int length = 4;
 
 		if (!(ctx->output_mask & (1ull << i)))
 			continue;
 
 		if (i == VARYING_SLOT_CLIP_DIST0)
 			length = ctx->num_output_clips + ctx->num_output_culls;
 		int param = shader_io_get_unique_index(i);
 		mark_tess_output(ctx, false, param);
 		if (length > 4)
@@ -5736,34 +5752,34 @@ handle_fs_outputs_post(struct nir_to_llvm_context *ctx)
 
 	for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
 		LLVMValueRef values[4];
 
 		if (!(ctx->output_mask & (1ull << i)))
 			continue;
 
 		if (i == FRAG_RESULT_DEPTH) {
 			ctx->shader_info->fs.writes_z = true;
 			depth = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
-							    ctx->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
+							    ctx->nir->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
 		} else if (i == FRAG_RESULT_STENCIL) {
 			ctx->shader_info->fs.writes_stencil = true;
 			stencil = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
-							      ctx->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
+							      ctx->nir->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
 		} else if (i == FRAG_RESULT_SAMPLE_MASK) {
 			ctx->shader_info->fs.writes_sample_mask = true;
 			samplemask = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
-								  ctx->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
+								  ctx->nir->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
 		} else {
 			bool last = false;
 			for (unsigned j = 0; j < 4; j++)
 				values[j] = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
-									ctx->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
+									ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
 
 			if (!ctx->shader_info->fs.writes_z && !ctx->shader_info->fs.writes_stencil && !ctx->shader_info->fs.writes_sample_mask)
 				last = ctx->output_mask <= ((1ull << (i + 1)) - 1);
 
 			bool ret = si_export_mrt_color(ctx, values, V_008DFC_SQ_EXP_MRT + (i - FRAG_RESULT_DATA0), last, &color_args[index]);
 			if (ret)
 				index++;
 		}
 	}
 
@@ -5932,28 +5948,67 @@ ac_nir_get_max_workgroup_size(enum chip_class chip_class,
 	default:
 		return 0;
 	}
 
 	unsigned max_workgroup_size = nir->info.cs.local_size[0] *
 		nir->info.cs.local_size[1] *
 		nir->info.cs.local_size[2];
 	return max_workgroup_size;
 }
 
+void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
+		      struct nir_shader *nir, struct nir_to_llvm_context *nctx)
+{
+	struct ac_nir_context ctx = {};
+	struct nir_function *func;
+
+	ctx.ac = *ac;
+	ctx.abi = abi;
+
+	ctx.nctx = nctx;
+	if (nctx)
+		nctx->nir = &ctx;
+
+	ctx.stage = nir->stage;
+
+	nir_foreach_variable(variable, &nir->outputs)
+		handle_shader_output_decl(nctx, variable);
+
+	ctx.defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+	                                   _mesa_key_pointer_equal);
+	ctx.phis = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+	                                   _mesa_key_pointer_equal);
+
+	func = (struct nir_function *)exec_list_get_head(&nir->functions);
+
+	setup_locals(&ctx, func);
+
+	visit_cf_list(&ctx, &func->impl->body);
+	phi_post_pass(&ctx);
+
+	handle_shader_outputs_post(nctx);
+
+	free(ctx.locals);
+	ralloc_free(ctx.defs);
+	ralloc_free(ctx.phis);
+
+	if (nctx)
+		nctx->nir = NULL;
+}
+
 static
 LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
                                        struct nir_shader *nir,
                                        struct ac_shader_variant_info *shader_info,
                                        const struct ac_nir_compiler_options *options)
 {
 	struct nir_to_llvm_context ctx = {0};
-	struct nir_function *func;
 	unsigned i;
 	ctx.options = options;
 	ctx.shader_info = shader_info;
 	ctx.context = LLVMContextCreate();
 	ctx.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
 
 	ac_llvm_context_init(&ctx.ac, ctx.context);
 	ctx.ac.module = ctx.module;
 
 	ctx.has_ds_bpermute = ctx.options->chip_class >= VI;
@@ -6009,53 +6064,36 @@ LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
 	} else if (nir->stage == MESA_SHADER_GEOMETRY) {
 		ctx.gs_next_vertex = ac_build_alloca(&ctx, ctx.i32, "gs_next_vertex");
 
 		ctx.gs_max_out_vertices = nir->info.gs.vertices_out;
 	} else if (nir->stage == MESA_SHADER_TESS_EVAL) {
 		ctx.tes_primitive_mode = nir->info.tess.primitive_mode;
 	}
 
 	ac_setup_rings(&ctx);
 
+	ctx.num_output_clips = nir->info.clip_distance_array_size;
+	ctx.num_output_culls = nir->info.cull_distance_array_size;
+
 	nir_foreach_variable(variable, &nir->inputs)
 		handle_shader_input_decl(&ctx, variable);
 
 	if (nir->stage == MESA_SHADER_FRAGMENT)
 		handle_fs_inputs_pre(&ctx, nir);
 
-	ctx.num_output_clips = nir->info.clip_distance_array_size;
-	ctx.num_output_culls = nir->info.cull_distance_array_size;
+	ac_nir_translate(&ctx.ac, &ctx.abi, nir, &ctx);
 
-	nir_foreach_variable(variable, &nir->outputs)
-		handle_shader_output_decl(&ctx, variable);
-
-	ctx.defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
-	                                   _mesa_key_pointer_equal);
-	ctx.phis = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
-	                                   _mesa_key_pointer_equal);
-
-	func = (struct nir_function *)exec_list_get_head(&nir->functions);
-
-	setup_locals(&ctx, func);
-
-	visit_cf_list(&ctx, &func->impl->body);
-	phi_post_pass(&ctx);
-
-	handle_shader_outputs_post(&ctx);
 	LLVMBuildRetVoid(ctx.builder);
 
 	ac_llvm_finalize_module(&ctx);
 
 	ac_nir_eliminate_const_vs_outputs(&ctx);
-	free(ctx.locals);
-	ralloc_free(ctx.defs);
-	ralloc_free(ctx.phis);
 
 	if (nir->stage == MESA_SHADER_GEOMETRY) {
 		unsigned addclip = ctx.num_output_clips + ctx.num_output_culls > 4;
 		shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
 		shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
 			nir->info.gs.vertices_out;
 	} else if (nir->stage == MESA_SHADER_TESS_CTRL) {
 		shader_info->tcs.outputs_written = ctx.tess_outputs_written;
 		shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
 	} else if (nir->stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
@@ -6275,21 +6313,21 @@ ac_gs_copy_shader_emit(struct nir_to_llvm_context *ctx)
 					       (slot * 4 + j) *
 					       ctx->gs_max_out_vertices * 16 * 4, false);
 
 			value = ac_build_intrinsic(&ctx->ac,
 						   "llvm.SI.buffer.load.dword.i32.i32",
 						   ctx->i32, args, 9,
 						   AC_FUNC_ATTR_READONLY |
 						   AC_FUNC_ATTR_LEGACY);
 
 			LLVMBuildStore(ctx->builder,
-				       to_float(&ctx->ac, value), ctx->outputs[radeon_llvm_reg_index_soa(i, j)]);
+				       to_float(&ctx->ac, value), ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)]);
 		}
 		idx += slot_inc;
 	}
 	handle_vs_outputs_post(ctx, false, &ctx->shader_info->vs.outinfo);
 }
 
 void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
 			      struct nir_shader *geom_shader,
 			      struct ac_shader_binary *binary,
 			      struct ac_shader_config *config,
@@ -6315,23 +6353,32 @@ void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
 	ctx.stage = MESA_SHADER_VERTEX;
 
 	create_function(&ctx);
 
 	ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
 	ac_setup_rings(&ctx);
 
 	ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
 	ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
 
+	struct ac_nir_context nir_ctx = {};
+	nir_ctx.ac = ctx.ac;
+	nir_ctx.abi = &ctx.abi;
+
+	nir_ctx.nctx = &ctx;
+	ctx.nir = &nir_ctx;
+
 	nir_foreach_variable(variable, &geom_shader->outputs)
 		handle_shader_output_decl(&ctx, variable);
 
 	ac_gs_copy_shader_emit(&ctx);
 
+	ctx.nir = NULL;
+
 	LLVMBuildRetVoid(ctx.builder);
 
 	ac_llvm_finalize_module(&ctx);
 
 	ac_compile_llvm_module(tm, ctx.module, binary, config, shader_info,
 			       MESA_SHADER_VERTEX,
 			       dump_shader, options->supports_spill);
 }
diff --git a/src/amd/common/ac_nir_to_llvm.h b/src/amd/common/ac_nir_to_llvm.h
index 54d5489..791c694 100644
--- a/src/amd/common/ac_nir_to_llvm.h
+++ b/src/amd/common/ac_nir_to_llvm.h
@@ -29,20 +29,22 @@
 #include "llvm-c/TargetMachine.h"
 #include "amd_family.h"
 #include "../vulkan/radv_descriptor_set.h"
 #include "ac_shader_info.h"
 #include "shader_enums.h"
 struct ac_shader_binary;
 struct ac_shader_config;
 struct nir_shader;
 struct radv_pipeline_layout;
 
+struct ac_llvm_context;
+struct ac_shader_abi;
 
 struct ac_vs_variant_key {
 	uint32_t instance_rate_inputs;
 	uint32_t as_es:1;
 	uint32_t as_ls:1;
 	uint32_t export_prim_id:1;
 };
 
 struct ac_tes_variant_key {
 	uint32_t as_es:1;
@@ -214,11 +216,15 @@ void ac_compile_nir_shader(LLVMTargetMachineRef tm,
 			   bool dump_shader);
 
 void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
 			      struct nir_shader *geom_shader,
 			      struct ac_shader_binary *binary,
 			      struct ac_shader_config *config,
 			      struct ac_shader_variant_info *shader_info,
 			      const struct ac_nir_compiler_options *options,
 			      bool dump_shader);
 
+struct nir_to_llvm_context;
+void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
+		      struct nir_shader *nir, struct nir_to_llvm_context *nctx);
+
 #endif /* AC_NIR_TO_LLVM_H */
-- 
2.9.3



More information about the mesa-dev mailing list