[Mesa-dev] [PATCH 1/2] radeonsi: enumerize DBG flags

Marek Olšák maraeo at gmail.com
Sat Oct 7 16:19:43 UTC 2017


From: Marek Olšák <marek.olsak at amd.com>

---
 src/gallium/drivers/radeon/r600_buffer_common.c    |   6 +-
 src/gallium/drivers/radeon/r600_pipe_common.c      |  99 +++++++++----------
 src/gallium/drivers/radeon/r600_pipe_common.h      | 110 ++++++++++++---------
 src/gallium/drivers/radeon/r600_texture.c          |  18 ++--
 src/gallium/drivers/radeonsi/si_blit.c             |   2 +-
 src/gallium/drivers/radeonsi/si_debug.c            |   2 +-
 src/gallium/drivers/radeonsi/si_hw_context.c       |   4 +-
 src/gallium/drivers/radeonsi/si_pipe.c             |  38 +++----
 src/gallium/drivers/radeonsi/si_shader.c           |  10 +-
 .../drivers/radeonsi/si_shader_tgsi_setup.c        |   4 +-
 src/gallium/drivers/radeonsi/si_state_draw.c       |   2 +-
 src/gallium/drivers/radeonsi/si_state_shaders.c    |   4 +-
 12 files changed, 155 insertions(+), 144 deletions(-)

diff --git a/src/gallium/drivers/radeon/r600_buffer_common.c b/src/gallium/drivers/radeon/r600_buffer_common.c
index b3e60a4..58b213f 100644
--- a/src/gallium/drivers/radeon/r600_buffer_common.c
+++ b/src/gallium/drivers/radeon/r600_buffer_common.c
@@ -180,21 +180,21 @@ void si_init_resource_fields(struct r600_common_screen *rscreen,
 	 * DRM 3.6.0 has good BO move throttling, so we can allow VRAM-only
 	 * placements even with a low amount of stolen VRAM.
 	 */
 	if (!rscreen->info.has_dedicated_vram &&
 	    (rscreen->info.drm_major < 3 || rscreen->info.drm_minor < 6) &&
 	    res->domains == RADEON_DOMAIN_VRAM) {
 		res->domains = RADEON_DOMAIN_VRAM_GTT;
 		res->flags &= ~RADEON_FLAG_NO_CPU_ACCESS; /* disallowed with VRAM_GTT */
 	}
 
-	if (rscreen->debug_flags & DBG_NO_WC)
+	if (rscreen->debug_flags & DBG(NO_WC))
 		res->flags &= ~RADEON_FLAG_GTT_WC;
 
 	/* Set expected VRAM and GART usage for the buffer. */
 	res->vram_usage = 0;
 	res->gart_usage = 0;
 
 	if (res->domains & RADEON_DOMAIN_VRAM)
 		res->vram_usage = size;
 	else if (res->domains & RADEON_DOMAIN_GTT)
 		res->gart_usage = size;
@@ -224,21 +224,21 @@ bool si_alloc_resource(struct r600_common_screen *rscreen,
 		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
 	else
 		res->gpu_address = 0;
 
 	pb_reference(&old_buf, NULL);
 
 	util_range_set_empty(&res->valid_buffer_range);
 	res->TC_L2_dirty = false;
 
 	/* Print debug information. */
-	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
+	if (rscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
 		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
 			res->gpu_address, res->gpu_address + res->buf->size,
 			res->buf->size);
 	}
 	return true;
 }
 
 static void r600_buffer_destroy(struct pipe_screen *screen,
 				struct pipe_resource *buf)
 {
@@ -407,21 +407,21 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
 		if (r600_invalidate_buffer(rctx, rbuffer)) {
 			/* At this point, the buffer is always idle. */
 			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
 		} else {
 			/* Fall back to a temporary buffer. */
 			usage |= PIPE_TRANSFER_DISCARD_RANGE;
 		}
 	}
 
 	if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
-	    !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
+	    !(rscreen->debug_flags & DBG(NO_DISCARD_RANGE)) &&
 	    ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
 			 PIPE_TRANSFER_PERSISTENT)) &&
 	      r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
 	     (rbuffer->flags & RADEON_FLAG_SPARSE))) {
 		assert(usage & PIPE_TRANSFER_WRITE);
 
 		/* Check if mapping this buffer would cause waiting for the GPU.
 		 */
 		if (rbuffer->flags & RADEON_FLAG_SPARSE ||
 		    si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index fd4e2c1..1fbf39c 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -512,21 +512,21 @@ finish:
 	}
 }
 
 static void r600_flush_dma_ring(void *ctx, unsigned flags,
 				struct pipe_fence_handle **fence)
 {
 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
 	struct radeon_winsys_cs *cs = rctx->dma.cs;
 	struct radeon_saved_cs saved;
 	bool check_vm =
-		(rctx->screen->debug_flags & DBG_CHECK_VM) &&
+		(rctx->screen->debug_flags & DBG(CHECK_VM)) &&
 		rctx->check_vm_faults;
 
 	if (!radeon_emitted(cs, 0)) {
 		if (fence)
 			rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
 		return;
 	}
 
 	if (check_vm)
 		si_save_cs(rctx->ws, cs, &saved, true);
@@ -757,21 +757,21 @@ bool si_common_context_init(struct r600_common_context *rctx,
 
 	rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
 						 0, PIPE_USAGE_DEFAULT);
 	if (!rctx->b.const_uploader)
 		return false;
 
 	rctx->ctx = rctx->ws->ctx_create(rctx->ws);
 	if (!rctx->ctx)
 		return false;
 
-	if (rscreen->info.num_sdma_rings && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
+	if (rscreen->info.num_sdma_rings && !(rscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
 		rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
 						   r600_flush_dma_ring,
 						   rctx);
 		rctx->dma.flush = r600_flush_dma_ring;
 	}
 
 	return true;
 }
 
 void si_common_context_cleanup(struct r600_common_context *rctx)
@@ -815,68 +815,67 @@ void si_common_context_cleanup(struct r600_common_context *rctx)
 	rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
 	r600_resource_reference(&rctx->eop_bug_scratch, NULL);
 }
 
 /*
  * pipe_screen
  */
 
 static const struct debug_named_value common_debug_options[] = {
 	/* logging */
-	{ "tex", DBG_TEX, "Print texture info" },
-	{ "nir", DBG_NIR, "Enable experimental NIR shaders" },
-	{ "compute", DBG_COMPUTE, "Print compute info" },
-	{ "vm", DBG_VM, "Print virtual addresses when creating resources" },
-	{ "info", DBG_INFO, "Print driver information" },
+	{ "tex", DBG(TEX), "Print texture info" },
+	{ "nir", DBG(NIR), "Enable experimental NIR shaders" },
+	{ "compute", DBG(COMPUTE), "Print compute info" },
+	{ "vm", DBG(VM), "Print virtual addresses when creating resources" },
+	{ "info", DBG(INFO), "Print driver information" },
 
 	/* shaders */
-	{ "fs", DBG_FS, "Print fetch shaders" },
-	{ "vs", DBG_VS, "Print vertex shaders" },
-	{ "gs", DBG_GS, "Print geometry shaders" },
-	{ "ps", DBG_PS, "Print pixel shaders" },
-	{ "cs", DBG_CS, "Print compute shaders" },
-	{ "tcs", DBG_TCS, "Print tessellation control shaders" },
-	{ "tes", DBG_TES, "Print tessellation evaluation shaders" },
-	{ "noir", DBG_NO_IR, "Don't print the LLVM IR"},
-	{ "notgsi", DBG_NO_TGSI, "Don't print the TGSI"},
-	{ "noasm", DBG_NO_ASM, "Don't print disassembled shaders"},
-	{ "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
-	{ "checkir", DBG_CHECK_IR, "Enable additional sanity checks on shader IR" },
-	{ "nooptvariant", DBG_NO_OPT_VARIANT, "Disable compiling optimized shader variants." },
-
-	{ "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
-	{ "testvmfaultcp", DBG_TEST_VMFAULT_CP, "Invoke a CP VM fault test and exit." },
-	{ "testvmfaultsdma", DBG_TEST_VMFAULT_SDMA, "Invoke a SDMA VM fault test and exit." },
-	{ "testvmfaultshader", DBG_TEST_VMFAULT_SHADER, "Invoke a shader VM fault test and exit." },
+	{ "vs", DBG(VS), "Print vertex shaders" },
+	{ "gs", DBG(GS), "Print geometry shaders" },
+	{ "ps", DBG(PS), "Print pixel shaders" },
+	{ "cs", DBG(CS), "Print compute shaders" },
+	{ "tcs", DBG(TCS), "Print tessellation control shaders" },
+	{ "tes", DBG(TES), "Print tessellation evaluation shaders" },
+	{ "noir", DBG(NO_IR), "Don't print the LLVM IR"},
+	{ "notgsi", DBG(NO_TGSI), "Don't print the TGSI"},
+	{ "noasm", DBG(NO_ASM), "Don't print disassembled shaders"},
+	{ "preoptir", DBG(PREOPT_IR), "Print the LLVM IR before initial optimizations" },
+	{ "checkir", DBG(CHECK_IR), "Enable additional sanity checks on shader IR" },
+	{ "nooptvariant", DBG(NO_OPT_VARIANT), "Disable compiling optimized shader variants." },
+
+	{ "testdma", DBG(TEST_DMA), "Invoke SDMA tests and exit." },
+	{ "testvmfaultcp", DBG(TEST_VMFAULT_CP), "Invoke a CP VM fault test and exit." },
+	{ "testvmfaultsdma", DBG(TEST_VMFAULT_SDMA), "Invoke a SDMA VM fault test and exit." },
+	{ "testvmfaultshader", DBG(TEST_VMFAULT_SHADER), "Invoke a shader VM fault test and exit." },
 
 	/* features */
-	{ "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
-	{ "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
+	{ "nodma", DBG(NO_ASYNC_DMA), "Disable asynchronous DMA" },
+	{ "nohyperz", DBG(NO_HYPERZ), "Disable Hyper-Z" },
 	/* GL uses the word INVALIDATE, gallium uses the word DISCARD */
-	{ "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
-	{ "no2d", DBG_NO_2D_TILING, "Disable 2D tiling" },
-	{ "notiling", DBG_NO_TILING, "Disable tiling" },
-	{ "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on end-of-packet." },
-	{ "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all operations when possible." },
-	{ "precompile", DBG_PRECOMPILE, "Compile one shader variant at shader creation." },
-	{ "nowc", DBG_NO_WC, "Disable GTT write combining" },
-	{ "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
-	{ "nodcc", DBG_NO_DCC, "Disable DCC." },
-	{ "nodccclear", DBG_NO_DCC_CLEAR, "Disable DCC fast clear." },
-	{ "norbplus", DBG_NO_RB_PLUS, "Disable RB+." },
-	{ "sisched", DBG_SI_SCHED, "Enable LLVM SI Machine Instruction Scheduler." },
-	{ "mono", DBG_MONOLITHIC_SHADERS, "Use old-style monolithic shaders compiled on demand" },
-	{ "unsafemath", DBG_UNSAFE_MATH, "Enable unsafe math shader optimizations" },
-	{ "nodccfb", DBG_NO_DCC_FB, "Disable separate DCC on the main framebuffer" },
-	{ "nodpbb", DBG_NO_DPBB, "Disable DPBB." },
-	{ "nodfsm", DBG_NO_DFSM, "Disable DFSM." },
-	{ "nooutoforder", DBG_NO_OUT_OF_ORDER, "Disable out-of-order rasterization" },
+	{ "noinvalrange", DBG(NO_DISCARD_RANGE), "Disable handling of INVALIDATE_RANGE map flags" },
+	{ "no2d", DBG(NO_2D_TILING), "Disable 2D tiling" },
+	{ "notiling", DBG(NO_TILING), "Disable tiling" },
+	{ "switch_on_eop", DBG(SWITCH_ON_EOP), "Program WD/IA to switch on end-of-packet." },
+	{ "forcedma", DBG(FORCE_DMA), "Use asynchronous DMA for all operations when possible." },
+	{ "precompile", DBG(PRECOMPILE), "Compile one shader variant at shader creation." },
+	{ "nowc", DBG(NO_WC), "Disable GTT write combining" },
+	{ "check_vm", DBG(CHECK_VM), "Check VM faults and dump debug info." },
+	{ "nodcc", DBG(NO_DCC), "Disable DCC." },
+	{ "nodccclear", DBG(NO_DCC_CLEAR), "Disable DCC fast clear." },
+	{ "norbplus", DBG(NO_RB_PLUS), "Disable RB+." },
+	{ "sisched", DBG(SI_SCHED), "Enable LLVM SI Machine Instruction Scheduler." },
+	{ "mono", DBG(MONOLITHIC_SHADERS), "Use old-style monolithic shaders compiled on demand" },
+	{ "unsafemath", DBG(UNSAFE_MATH), "Enable unsafe math shader optimizations" },
+	{ "nodccfb", DBG(NO_DCC_FB), "Disable separate DCC on the main framebuffer" },
+	{ "nodpbb", DBG(NO_DPBB), "Disable DPBB." },
+	{ "nodfsm", DBG(NO_DFSM), "Disable DFSM." },
+	{ "nooutoforder", DBG(NO_OUT_OF_ORDER), "Disable out-of-order rasterization" },
 
 	DEBUG_NAMED_VALUE_END /* must be last */
 };
 
 static const char* r600_get_vendor(struct pipe_screen* pscreen)
 {
 	return "X.Org";
 }
 
 static const char* r600_get_device_vendor(struct pipe_screen* pscreen)
@@ -934,23 +933,23 @@ static void r600_disk_cache_create(struct r600_common_screen *rscreen)
 		if (disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo,
 						      &llvm_timestamp)) {
 			res = asprintf(&timestamp_str, "%u_%u",
 				       mesa_timestamp, llvm_timestamp);
 		}
 
 		if (res != -1) {
 			/* These flags affect shader compilation. */
 			uint64_t shader_debug_flags =
 				rscreen->debug_flags &
-				(DBG_FS_CORRECT_DERIVS_AFTER_KILL |
-				 DBG_SI_SCHED |
-				 DBG_UNSAFE_MATH);
+				(DBG(FS_CORRECT_DERIVS_AFTER_KILL) |
+				 DBG(SI_SCHED) |
+				 DBG(UNSAFE_MATH));
 
 			rscreen->disk_shader_cache =
 				disk_cache_create(r600_get_family_name(rscreen),
 						  timestamp_str,
 						  shader_debug_flags);
 			free(timestamp_str);
 		}
 	}
 }
 
@@ -1410,21 +1409,21 @@ bool si_common_screen_init(struct r600_common_screen *rscreen,
 	rscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
 	if (rscreen->force_aniso >= 0) {
 		printf("radeon: Forcing anisotropy filter to %ix\n",
 		       /* round down to a power of two */
 		       1 << util_logbase2(rscreen->force_aniso));
 	}
 
 	(void) mtx_init(&rscreen->aux_context_lock, mtx_plain);
 	(void) mtx_init(&rscreen->gpu_load_mutex, mtx_plain);
 
-	if (rscreen->debug_flags & DBG_INFO) {
+	if (rscreen->debug_flags & DBG(INFO)) {
 		printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
 		       rscreen->info.pci_domain, rscreen->info.pci_bus,
 		       rscreen->info.pci_dev, rscreen->info.pci_func);
 		printf("pci_id = 0x%x\n", rscreen->info.pci_id);
 		printf("family = %i (%s)\n", rscreen->info.family,
 		       r600_get_family_name(rscreen));
 		printf("chip_class = %i\n", rscreen->info.chip_class);
 		printf("pte_fragment_size = %u\n", rscreen->info.pte_fragment_size);
 		printf("gart_page_size = %u\n", rscreen->info.gart_page_size);
 		printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
@@ -1490,21 +1489,21 @@ void si_destroy_common_screen(struct r600_common_screen *rscreen)
 }
 
 bool si_can_dump_shader(struct r600_common_screen *rscreen,
 			unsigned processor)
 {
 	return rscreen->debug_flags & (1 << processor);
 }
 
 bool si_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)
 {
-	return (rscreen->debug_flags & DBG_CHECK_IR) ||
+	return (rscreen->debug_flags & DBG(CHECK_IR)) ||
 	       si_can_dump_shader(rscreen, processor);
 }
 
 void si_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
 			    uint64_t offset, uint64_t size, unsigned value)
 {
 	struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
 
 	mtx_lock(&rscreen->aux_context_lock);
 	rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
index 8530798..8c08eda 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.h
+++ b/src/gallium/drivers/radeon/r600_pipe_common.h
@@ -63,68 +63,80 @@ struct u_log_context;
 #define R600_CONTEXT_STOP_PIPELINE_STATS	(1u << 2)
 #define R600_CONTEXT_FLUSH_FOR_RENDER_COND	(1u << 3)
 #define R600_CONTEXT_PRIVATE_FLAG		(1u << 4)
 
 /* special primitive types */
 #define R600_PRIM_RECTANGLE_LIST	PIPE_PRIM_MAX
 
 #define R600_NOT_QUERY		0xffffffff
 
 /* Debug flags. */
-#define DBG_VS			(1 << PIPE_SHADER_VERTEX)
-#define DBG_PS			(1 << PIPE_SHADER_FRAGMENT)
-#define DBG_GS			(1 << PIPE_SHADER_GEOMETRY)
-#define DBG_TCS			(1 << PIPE_SHADER_TESS_CTRL)
-#define DBG_TES			(1 << PIPE_SHADER_TESS_EVAL)
-#define DBG_CS			(1 << PIPE_SHADER_COMPUTE)
-#define DBG_ALL_SHADERS		(DBG_FS - 1)
-#define DBG_FS			(1 << 6) /* fetch shader */
-#define DBG_TEX			(1 << 7)
-#define DBG_NIR			(1 << 8)
-#define DBG_COMPUTE		(1 << 9)
-/* gap */
-#define DBG_VM			(1 << 11)
-#define DBG_NO_IR		(1 << 12)
-#define DBG_NO_TGSI		(1 << 13)
-#define DBG_NO_ASM		(1 << 14)
-#define DBG_PREOPT_IR		(1 << 15)
-#define DBG_CHECK_IR		(1 << 16)
-#define DBG_NO_OPT_VARIANT	(1 << 17)
-#define DBG_FS_CORRECT_DERIVS_AFTER_KILL (1 << 18)
-/* gaps */
-#define DBG_TEST_DMA		(1 << 20)
-/* Bits 21-31 are reserved for the r600g driver. */
-/* features */
-#define DBG_NO_ASYNC_DMA	(1ull << 32)
-#define DBG_NO_HYPERZ		(1ull << 33)
-#define DBG_NO_DISCARD_RANGE	(1ull << 34)
-#define DBG_NO_2D_TILING	(1ull << 35)
-#define DBG_NO_TILING		(1ull << 36)
-#define DBG_SWITCH_ON_EOP	(1ull << 37)
-#define DBG_FORCE_DMA		(1ull << 38)
-#define DBG_PRECOMPILE		(1ull << 39)
-#define DBG_INFO		(1ull << 40)
-#define DBG_NO_WC		(1ull << 41)
-#define DBG_CHECK_VM		(1ull << 42)
-#define DBG_NO_DCC		(1ull << 43)
-#define DBG_NO_DCC_CLEAR	(1ull << 44)
-#define DBG_NO_RB_PLUS		(1ull << 45)
-#define DBG_SI_SCHED		(1ull << 46)
-#define DBG_MONOLITHIC_SHADERS	(1ull << 47)
-#define DBG_NO_OUT_OF_ORDER	(1ull << 48)
-#define DBG_UNSAFE_MATH		(1ull << 49)
-#define DBG_NO_DCC_FB		(1ull << 50)
-#define DBG_TEST_VMFAULT_CP	(1ull << 51)
-#define DBG_TEST_VMFAULT_SDMA	(1ull << 52)
-#define DBG_TEST_VMFAULT_SHADER	(1ull << 53)
-#define DBG_NO_DPBB		(1ull << 54)
-#define DBG_NO_DFSM		(1ull << 55)
+enum {
+	/* Shader logging options: */
+	DBG_VS = PIPE_SHADER_VERTEX,
+	DBG_PS = PIPE_SHADER_FRAGMENT,
+	DBG_GS = PIPE_SHADER_GEOMETRY,
+	DBG_TCS = PIPE_SHADER_TESS_CTRL,
+	DBG_TES = PIPE_SHADER_TESS_EVAL,
+	DBG_CS = PIPE_SHADER_COMPUTE,
+	DBG_NO_IR,
+	DBG_NO_TGSI,
+	DBG_NO_ASM,
+	DBG_PREOPT_IR,
+
+	/* Shader compiler options the shader cache should be aware of: */
+	DBG_FS_CORRECT_DERIVS_AFTER_KILL,
+	DBG_UNSAFE_MATH,
+	DBG_SI_SCHED,
+
+	/* Shader compiler options (with no effect on the shader cache): */
+	DBG_CHECK_IR,
+	DBG_PRECOMPILE,
+	DBG_NIR,
+	DBG_MONOLITHIC_SHADERS,
+	DBG_NO_OPT_VARIANT,
+
+	/* Information logging options: */
+	DBG_INFO,
+	DBG_TEX,
+	DBG_COMPUTE,
+	DBG_VM,
+
+	/* Driver options: */
+	DBG_FORCE_DMA,
+	DBG_NO_ASYNC_DMA,
+	DBG_NO_DISCARD_RANGE,
+	DBG_NO_WC,
+	DBG_CHECK_VM,
+
+	/* 3D engine options: */
+	DBG_SWITCH_ON_EOP,
+	DBG_NO_OUT_OF_ORDER,
+	DBG_NO_DPBB,
+	DBG_NO_DFSM,
+	DBG_NO_HYPERZ,
+	DBG_NO_RB_PLUS,
+	DBG_NO_2D_TILING,
+	DBG_NO_TILING,
+	DBG_NO_DCC,
+	DBG_NO_DCC_CLEAR,
+	DBG_NO_DCC_FB,
+
+	/* Tests: */
+	DBG_TEST_DMA,
+	DBG_TEST_VMFAULT_CP,
+	DBG_TEST_VMFAULT_SDMA,
+	DBG_TEST_VMFAULT_SHADER,
+};
+
+#define DBG_ALL_SHADERS		(((1 << (DBG_CS + 1)) - 1))
+#define DBG(name)		(1ull << DBG_##name)
 
 #define R600_MAP_BUFFER_ALIGNMENT 64
 
 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
 
 enum r600_coherency {
 	R600_COHERENCY_NONE, /* no cache flushes needed */
 	R600_COHERENCY_SHADER,
 	R600_COHERENCY_CB_META,
 };
@@ -958,21 +970,21 @@ r600_htile_enabled(struct r600_texture *tex, unsigned level)
 
 static inline bool
 vi_tc_compat_htile_enabled(struct r600_texture *tex, unsigned level)
 {
 	assert(!tex->tc_compatible_htile || tex->htile_offset);
 	return tex->tc_compatible_htile && level == 0;
 }
 
 #define COMPUTE_DBG(rscreen, fmt, args...) \
 	do { \
-		if ((rscreen->b.debug_flags & DBG_COMPUTE)) fprintf(stderr, fmt, ##args); \
+		if ((rscreen->b.debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
 	} while (0);
 
 #define R600_ERR(fmt, args...) \
 	fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)
 
 /* For MSAA sample positions. */
 #define FILL_SREG(s0x, s0y, s1x, s1y, s2x, s2y, s3x, s3y)  \
 	(((s0x) & 0xf) | (((unsigned)(s0y) & 0xf) << 4) |		   \
 	(((unsigned)(s1x) & 0xf) << 8) | (((unsigned)(s1y) & 0xf) << 12) |	   \
 	(((unsigned)(s2x) & 0xf) << 16) | (((unsigned)(s2y) & 0xf) << 20) |	   \
diff --git a/src/gallium/drivers/radeon/r600_texture.c b/src/gallium/drivers/radeon/r600_texture.c
index 4167194..d9dc10b 100644
--- a/src/gallium/drivers/radeon/r600_texture.c
+++ b/src/gallium/drivers/radeon/r600_texture.c
@@ -1177,42 +1177,42 @@ r600_texture_create_object(struct pipe_screen *screen,
 			rtex->can_sample_s = true;
 		} else {
 			rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
 			rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
 		}
 
 		if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
 				     R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
 			rtex->db_compatible = true;
 
-			if (!(rscreen->debug_flags & DBG_NO_HYPERZ))
+			if (!(rscreen->debug_flags & DBG(NO_HYPERZ)))
 				r600_texture_allocate_htile(rscreen, rtex);
 		}
 	} else {
 		if (base->nr_samples > 1) {
 			if (!buf) {
 				r600_texture_allocate_fmask(rscreen, rtex);
 				r600_texture_allocate_cmask(rscreen, rtex);
 				rtex->cmask_buffer = &rtex->resource;
 			}
 			if (!rtex->fmask.size || !rtex->cmask.size) {
 				FREE(rtex);
 				return NULL;
 			}
 		}
 
 		/* Shared textures must always set up DCC here.
 		 * If it's not present, it will be disabled by
 		 * apply_opaque_metadata later.
 		 */
 		if (rtex->surface.dcc_size &&
-		    (buf || !(rscreen->debug_flags & DBG_NO_DCC)) &&
+		    (buf || !(rscreen->debug_flags & DBG(NO_DCC))) &&
 		    !(rtex->surface.flags & RADEON_SURF_SCANOUT)) {
 			/* Reserve space for the DCC buffer. */
 			rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
 			rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
 		}
 	}
 
 	/* Now create the backing buffer. */
 	if (!buf) {
 		si_init_resource_fields(rscreen, resource, rtex->size,
@@ -1257,29 +1257,29 @@ r600_texture_create_object(struct pipe_screen *screen,
 		si_screen_clear_buffer(rscreen, &rtex->resource.b.b,
 					 rtex->dcc_offset,
 					 rtex->surface.dcc_size,
 					 0xFFFFFFFF);
 	}
 
 	/* Initialize the CMASK base register value. */
 	rtex->cmask.base_address_reg =
 		(rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
 
-	if (rscreen->debug_flags & DBG_VM) {
+	if (rscreen->debug_flags & DBG(VM)) {
 		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
 			rtex->resource.gpu_address,
 			rtex->resource.gpu_address + rtex->resource.buf->size,
 			base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
 			base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
 	}
 
-	if (rscreen->debug_flags & DBG_TEX) {
+	if (rscreen->debug_flags & DBG(TEX)) {
 		puts("Texture:");
 		struct u_log_context log;
 		u_log_context_init(&log);
 		si_print_texture_info(rscreen, rtex, &log);
 		u_log_new_page_print(&log, stdout);
 		fflush(stdout);
 		u_log_context_destroy(&log);
 	}
 
 	return rtex;
@@ -1309,21 +1309,21 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
 	    is_depth_stencil &&
 	    (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY))
 		return RADEON_SURF_MODE_2D;
 
 	/* Handle common candidates for the linear mode.
 	 * Compressed textures and DB surfaces must always be tiled.
 	 */
 	if (!force_tiling &&
 	    !is_depth_stencil &&
 	    !util_format_is_compressed(templ->format)) {
-		if (rscreen->debug_flags & DBG_NO_TILING)
+		if (rscreen->debug_flags & DBG(NO_TILING))
 			return RADEON_SURF_MODE_LINEAR_ALIGNED;
 
 		/* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
 		if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
 			return RADEON_SURF_MODE_LINEAR_ALIGNED;
 
 		/* Cursors are linear on SI.
 		 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
 		if (templ->bind & PIPE_BIND_CURSOR)
 			return RADEON_SURF_MODE_LINEAR_ALIGNED;
@@ -1340,37 +1340,37 @@ r600_choose_tiling(struct r600_common_screen *rscreen,
 			return RADEON_SURF_MODE_LINEAR_ALIGNED;
 
 		/* Textures likely to be mapped often. */
 		if (templ->usage == PIPE_USAGE_STAGING ||
 		    templ->usage == PIPE_USAGE_STREAM)
 			return RADEON_SURF_MODE_LINEAR_ALIGNED;
 	}
 
 	/* Make small textures 1D tiled. */
 	if (templ->width0 <= 16 || templ->height0 <= 16 ||
-	    (rscreen->debug_flags & DBG_NO_2D_TILING))
+	    (rscreen->debug_flags & DBG(NO_2D_TILING)))
 		return RADEON_SURF_MODE_1D;
 
 	/* The allocator will switch to 1D if needed. */
 	return RADEON_SURF_MODE_2D;
 }
 
 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
 					const struct pipe_resource *templ)
 {
 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
 	struct radeon_surf surface = {0};
 	bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;
 	bool tc_compatible_htile =
 		rscreen->chip_class >= VI &&
 		(templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
-		!(rscreen->debug_flags & DBG_NO_HYPERZ) &&
+		!(rscreen->debug_flags & DBG(NO_HYPERZ)) &&
 		!is_flushed_depth &&
 		templ->nr_samples <= 1 && /* TC-compat HTILE is less efficient with MSAA */
 		util_format_is_depth_or_stencil(templ->format);
 
 	int r;
 
 	r = r600_init_surface(rscreen, &surface, templ,
 			      r600_choose_tiling(rscreen, templ), 0, 0,
 			      false, false, is_flushed_depth,
 			      tc_compatible_htile);
@@ -2679,39 +2679,39 @@ void si_do_fast_color_clear(struct r600_common_context *rctx,
 		    tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
 		    rctx->screen->info.drm_major == 2 &&
 		    rctx->screen->info.drm_minor < 38) {
 			continue;
 		}
 
 		/* Fast clear is the most appropriate place to enable DCC for
 		 * displayable surfaces.
 		 */
 		if (rctx->chip_class >= VI &&
-		    !(rctx->screen->debug_flags & DBG_NO_DCC_FB)) {
+		    !(rctx->screen->debug_flags & DBG(NO_DCC_FB))) {
 			vi_separate_dcc_try_enable(rctx, tex);
 
 			/* RB+ isn't supported with a CMASK clear only on Stoney,
 			 * so all clears are considered to be hypothetically slow
 			 * clears, which is weighed when determining whether to
 			 * enable separate DCC.
 			 */
 			if (tex->dcc_gather_statistics &&
 			    rctx->family == CHIP_STONEY)
 				tex->num_slow_clears++;
 		}
 
 		/* Try to clear DCC first, otherwise try CMASK. */
 		if (vi_dcc_enabled(tex, 0)) {
 			uint32_t reset_value;
 			bool clear_words_needed;
 
-			if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
+			if (rctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
 				continue;
 
 			if (!vi_get_fast_clear_parameters(fb->cbufs[i]->format,
 							  color, &reset_value,
 							  &clear_words_needed))
 				continue;
 
 			vi_dcc_clear_level(rctx, tex, 0, reset_value);
 
 			unsigned level_bit = 1 << fb->cbufs[i]->u.tex.level;
diff --git a/src/gallium/drivers/radeonsi/si_blit.c b/src/gallium/drivers/radeonsi/si_blit.c
index 4b7cca6..72ee0f0 100644
--- a/src/gallium/drivers/radeonsi/si_blit.c
+++ b/src/gallium/drivers/radeonsi/si_blit.c
@@ -1389,21 +1389,21 @@ static void si_blit(struct pipe_context *ctx,
 					      info->src.level,
 					      info->src.format);
 	vi_disable_dcc_if_incompatible_format(&sctx->b, info->dst.resource,
 					      info->dst.level,
 					      info->dst.format);
 	si_decompress_subresource(ctx, info->src.resource, info->mask,
 				  info->src.level,
 				  info->src.box.z,
 				  info->src.box.z + info->src.box.depth - 1);
 
-	if (sctx->screen->b.debug_flags & DBG_FORCE_DMA &&
+	if (sctx->screen->b.debug_flags & DBG(FORCE_DMA) &&
 	    util_try_blit_via_copy_region(ctx, info))
 		return;
 
 	si_blitter_begin(ctx, SI_BLIT |
 			 (info->render_condition_enable ? 0 : SI_DISABLE_RENDER_COND));
 	util_blitter_blit(sctx->blitter, info);
 	si_blitter_end(ctx);
 }
 
 static boolean si_generate_mipmap(struct pipe_context *ctx,
diff --git a/src/gallium/drivers/radeonsi/si_debug.c b/src/gallium/drivers/radeonsi/si_debug.c
index ddf65d6..1aca98b 100644
--- a/src/gallium/drivers/radeonsi/si_debug.c
+++ b/src/gallium/drivers/radeonsi/si_debug.c
@@ -1096,14 +1096,14 @@ void si_check_vm_faults(struct r600_common_context *ctx,
 }
 
 void si_init_debug_functions(struct si_context *sctx)
 {
 	sctx->b.b.dump_debug_state = si_dump_debug_state;
 	sctx->b.check_vm_faults = si_check_vm_faults;
 
 	/* Set the initial dmesg timestamp for this context, so that
 	 * only new messages will be checked for VM faults.
 	 */
-	if (sctx->screen->b.debug_flags & DBG_CHECK_VM)
+	if (sctx->screen->b.debug_flags & DBG(CHECK_VM))
 		ac_vm_fault_occured(sctx->b.chip_class,
 				    &sctx->dmesg_timestamp, NULL);
 }
diff --git a/src/gallium/drivers/radeonsi/si_hw_context.c b/src/gallium/drivers/radeonsi/si_hw_context.c
index ef03a6d..72da54e 100644
--- a/src/gallium/drivers/radeonsi/si_hw_context.c
+++ b/src/gallium/drivers/radeonsi/si_hw_context.c
@@ -76,21 +76,21 @@ void si_context_gfx_flush(void *context, unsigned flags,
 
 	if (ctx->gfx_flush_in_progress)
 		return;
 
 	if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
 		return;
 
 	if (si_check_device_reset(&ctx->b))
 		return;
 
-	if (ctx->screen->b.debug_flags & DBG_CHECK_VM)
+	if (ctx->screen->b.debug_flags & DBG(CHECK_VM))
 		flags &= ~RADEON_FLUSH_ASYNC;
 
 	/* If the state tracker is flushing the GFX IB, r600_flush_from_st is
 	 * responsible for flushing the DMA IB and merging the fences from both.
 	 * This code is only needed when the driver flushes the GFX IB
 	 * internally, and it never asks for a fence handle.
 	 */
 	if (radeon_emitted(ctx->b.dma.cs, 0)) {
 		assert(fence == NULL); /* internal flushes only */
 		ctx->b.dma.flush(ctx, flags, NULL);
@@ -119,21 +119,21 @@ void si_context_gfx_flush(void *context, unsigned flags,
 		ctx->current_saved_cs->flushed = true;
 	}
 
 	/* Flush the CS. */
 	ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
 	if (fence)
 		ws->fence_reference(fence, ctx->b.last_gfx_fence);
 	ctx->b.num_gfx_cs_flushes++;
 
 	/* Check VM faults if needed. */
-	if (ctx->screen->b.debug_flags & DBG_CHECK_VM) {
+	if (ctx->screen->b.debug_flags & DBG(CHECK_VM)) {
 		/* Use conservative timeout 800ms, after which we won't wait any
 		 * longer and assume the GPU is hung.
 		 */
 		ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
 
 		si_check_vm_faults(&ctx->b, &ctx->current_saved_cs->gfx, RING_GFX);
 	}
 
 	if (ctx->current_saved_cs)
 		si_saved_cs_reference(&ctx->current_saved_cs, NULL);
diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
index 5e20a0b..da5a97b 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.c
+++ b/src/gallium/drivers/radeonsi/si_pipe.c
@@ -135,21 +135,21 @@ static void si_emit_string_marker(struct pipe_context *ctx,
 static LLVMTargetMachineRef
 si_create_llvm_target_machine(struct si_screen *sscreen)
 {
 	const char *triple = "amdgcn--";
 	char features[256];
 
 	snprintf(features, sizeof(features),
 		 "+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals%s%s%s",
 		 sscreen->b.chip_class >= GFX9 ? ",+xnack" : ",-xnack",
 		 sscreen->llvm_has_working_vgpr_indexing ? "" : ",-promote-alloca",
-		 sscreen->b.debug_flags & DBG_SI_SCHED ? ",+si-scheduler" : "");
+		 sscreen->b.debug_flags & DBG(SI_SCHED) ? ",+si-scheduler" : "");
 
 	return LLVMCreateTargetMachine(ac_get_llvm_target(triple), triple,
 				       si_get_llvm_processor_name(sscreen->b.family),
 				       features,
 				       LLVMCodeGenLevelDefault,
 				       LLVMRelocDefault,
 				       LLVMCodeModelDefault);
 }
 
 static void si_set_log_context(struct pipe_context *ctx,
@@ -231,21 +231,21 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
 	si_init_state_functions(sctx);
 	si_init_shader_functions(sctx);
 	si_init_viewport_functions(sctx);
 	si_init_ia_multi_vgt_param_table(sctx);
 
 	if (sctx->b.chip_class >= CIK)
 		cik_init_sdma_functions(sctx);
 	else
 		si_init_dma_functions(sctx);
 
-	if (sscreen->b.debug_flags & DBG_FORCE_DMA)
+	if (sscreen->b.debug_flags & DBG(FORCE_DMA))
 		sctx->b.b.resource_copy_region = sctx->b.dma_copy;
 
 	sctx->blitter = util_blitter_create(&sctx->b.b);
 	if (sctx->blitter == NULL)
 		goto fail;
 	sctx->blitter->draw_rectangle = si_draw_rectangle;
 
 	sctx->sample_mask.sample_mask = 0xffff;
 
 	/* these must be last */
@@ -344,21 +344,21 @@ fail:
 	si_destroy_context(&sctx->b.b);
 	return NULL;
 }
 
 static struct pipe_context *si_pipe_create_context(struct pipe_screen *screen,
 						   void *priv, unsigned flags)
 {
 	struct si_screen *sscreen = (struct si_screen *)screen;
 	struct pipe_context *ctx;
 
-	if (sscreen->b.debug_flags & DBG_CHECK_VM)
+	if (sscreen->b.debug_flags & DBG(CHECK_VM))
 		flags |= PIPE_CONTEXT_DEBUG;
 
 	ctx = si_create_context(screen, flags);
 
 	if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
 		return ctx;
 
 	/* Clover (compute-only) is unsupported.
 	 *
 	 * Since the threaded context creates shader states from the non-driver
@@ -522,21 +522,21 @@ static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
 
 	case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
 	case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
 	case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
 	case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
 	case PIPE_CAP_MAX_VERTEX_STREAMS:
 	case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
 		return 4;
 
 	case PIPE_CAP_GLSL_FEATURE_LEVEL:
-		if (sscreen->b.debug_flags & DBG_NIR)
+		if (sscreen->b.debug_flags & DBG(NIR))
 			return 140; /* no geometry and tessellation shaders yet */
 		if (si_have_tgsi_compute(sscreen))
 			return 450;
 		return 420;
 
 	case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
 		return MIN2(sscreen->b.info.max_alloc_size, INT_MAX);
 
 	case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
 	case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
@@ -721,21 +721,21 @@ static int si_get_shader_param(struct pipe_screen* pscreen,
 	case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
 	case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
 		return SI_NUM_SAMPLERS;
 	case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
 		return SI_NUM_SHADER_BUFFERS;
 	case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
 		return SI_NUM_IMAGES;
 	case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
 		return 32;
 	case PIPE_SHADER_CAP_PREFERRED_IR:
-		if (sscreen->b.debug_flags & DBG_NIR &&
+		if (sscreen->b.debug_flags & DBG(NIR) &&
 		    (shader == PIPE_SHADER_VERTEX ||
 		     shader == PIPE_SHADER_FRAGMENT))
 			return PIPE_SHADER_IR_NIR;
 		return PIPE_SHADER_IR_TGSI;
 	case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
 		return 4;
 
 	/* Supported boolean features. */
 	case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
 	case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
@@ -916,31 +916,31 @@ static void si_test_vmfault(struct si_screen *sscreen)
 	struct pipe_resource *buf =
 		pipe_buffer_create(&sscreen->b.b, 0, PIPE_USAGE_DEFAULT, 64);
 
 	if (!buf) {
 		puts("Buffer allocation failed.");
 		exit(1);
 	}
 
 	r600_resource(buf)->gpu_address = 0; /* cause a VM fault */
 
-	if (sscreen->b.debug_flags & DBG_TEST_VMFAULT_CP) {
+	if (sscreen->b.debug_flags & DBG(TEST_VMFAULT_CP)) {
 		si_copy_buffer(sctx, buf, buf, 0, 4, 4, 0);
 		ctx->flush(ctx, NULL, 0);
 		puts("VM fault test: CP - done.");
 	}
-	if (sscreen->b.debug_flags & DBG_TEST_VMFAULT_SDMA) {
+	if (sscreen->b.debug_flags & DBG(TEST_VMFAULT_SDMA)) {
 		sctx->b.dma_clear_buffer(ctx, buf, 0, 4, 0);
 		ctx->flush(ctx, NULL, 0);
 		puts("VM fault test: SDMA - done.");
 	}
-	if (sscreen->b.debug_flags & DBG_TEST_VMFAULT_SHADER) {
+	if (sscreen->b.debug_flags & DBG(TEST_VMFAULT_SHADER)) {
 		util_test_constant_buffer(ctx, buf);
 		puts("VM fault test: Shader - done.");
 	}
 	exit(0);
 }
 
 static void radeonsi_get_driver_uuid(struct pipe_screen *pscreen, char *uuid)
 {
 	ac_compute_driver_uuid(uuid, PIPE_UUID_SIZE);
 }
@@ -972,23 +972,23 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws,
 	sscreen->b.b.get_driver_uuid = radeonsi_get_driver_uuid;
 	sscreen->b.b.resource_create = si_resource_create_common;
 
 	si_init_screen_state_functions(sscreen);
 
 	/* Set these flags in debug_flags early, so that the shader cache takes
 	 * them into account.
 	 */
 	if (driQueryOptionb(config->options,
 			    "glsl_correct_derivatives_after_discard"))
-		sscreen->b.debug_flags |= DBG_FS_CORRECT_DERIVS_AFTER_KILL;
+		sscreen->b.debug_flags |= DBG(FS_CORRECT_DERIVS_AFTER_KILL);
 	if (driQueryOptionb(config->options, "radeonsi_enable_sisched"))
-		sscreen->b.debug_flags |= DBG_SI_SCHED;
+		sscreen->b.debug_flags |= DBG(SI_SCHED);
 
 	if (!si_common_screen_init(&sscreen->b, ws) ||
 	    !si_init_gs_info(sscreen) ||
 	    !si_init_shader_cache(sscreen)) {
 		FREE(sscreen);
 		return NULL;
 	}
 
 	/* Only enable as many threads as we have target machines, but at most
 	 * the number of CPUs - 1 if there is more than one.
@@ -1043,61 +1043,61 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws,
 		 sscreen->b.info.me_fw_version >= 87) ||
 		(sscreen->b.chip_class == CIK &&
 		 sscreen->b.info.pfp_fw_version >= 211 &&
 		 sscreen->b.info.me_fw_version >= 173) ||
 		(sscreen->b.chip_class == SI &&
 		 sscreen->b.info.pfp_fw_version >= 79 &&
 		 sscreen->b.info.me_fw_version >= 142);
 
 	sscreen->has_out_of_order_rast = sscreen->b.chip_class >= VI &&
 					 sscreen->b.info.max_se >= 2 &&
-					 !(sscreen->b.debug_flags & DBG_NO_OUT_OF_ORDER);
+					 !(sscreen->b.debug_flags & DBG(NO_OUT_OF_ORDER));
 	sscreen->assume_no_z_fights =
 		driQueryOptionb(config->options, "radeonsi_assume_no_z_fights");
 	sscreen->commutative_blend_add =
 		driQueryOptionb(config->options, "radeonsi_commutative_blend_add");
 	sscreen->clear_db_meta_before_clear =
 		driQueryOptionb(config->options, "radeonsi_clear_db_meta_before_clear");
 	sscreen->has_msaa_sample_loc_bug = (sscreen->b.family >= CHIP_POLARIS10 &&
 					    sscreen->b.family <= CHIP_POLARIS12) ||
 					   sscreen->b.family == CHIP_VEGA10 ||
 					   sscreen->b.family == CHIP_RAVEN;
 	sscreen->dpbb_allowed = sscreen->b.chip_class >= GFX9 &&
-				!(sscreen->b.debug_flags & DBG_NO_DPBB);
+				!(sscreen->b.debug_flags & DBG(NO_DPBB));
 	sscreen->dfsm_allowed = sscreen->dpbb_allowed &&
-				!(sscreen->b.debug_flags & DBG_NO_DFSM);
+				!(sscreen->b.debug_flags & DBG(NO_DFSM));
 
 	/* While it would be nice not to have this flag, we are constrained
 	 * by the reality that LLVM 5.0 doesn't have working VGPR indexing
 	 * on GFX9.
 	 */
 	sscreen->llvm_has_working_vgpr_indexing = sscreen->b.chip_class <= VI;
 
 	sscreen->b.has_cp_dma = true;
 	sscreen->b.has_streamout = true;
 
 	/* Some chips have RB+ registers, but don't support RB+. Those must
 	 * always disable it.
 	 */
 	if (sscreen->b.family == CHIP_STONEY ||
 	    sscreen->b.chip_class >= GFX9) {
 		sscreen->b.has_rbplus = true;
 
 		sscreen->b.rbplus_allowed =
-			!(sscreen->b.debug_flags & DBG_NO_RB_PLUS) &&
+			!(sscreen->b.debug_flags & DBG(NO_RB_PLUS)) &&
 			(sscreen->b.family == CHIP_STONEY ||
 			 sscreen->b.family == CHIP_RAVEN);
 	}
 
 	(void) mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
 	sscreen->use_monolithic_shaders =
-		(sscreen->b.debug_flags & DBG_MONOLITHIC_SHADERS) != 0;
+		(sscreen->b.debug_flags & DBG(MONOLITHIC_SHADERS)) != 0;
 
 	sscreen->b.barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 |
 					    SI_CONTEXT_INV_VMEM_L1;
 	if (sscreen->b.chip_class <= VI) {
 		sscreen->b.barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_GLOBAL_L2;
 		sscreen->b.barrier_flags.L2_to_cp |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
 	}
 
 	sscreen->b.barrier_flags.compute_to_L2 = SI_CONTEXT_CS_PARTIAL_FLUSH;
 
@@ -1105,20 +1105,20 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws,
 		sscreen->b.debug_flags |= DBG_ALL_SHADERS;
 
 	for (i = 0; i < num_compiler_threads; i++)
 		sscreen->tm[i] = si_create_llvm_target_machine(sscreen);
 	for (i = 0; i < num_compiler_threads_lowprio; i++)
 		sscreen->tm_low_priority[i] = si_create_llvm_target_machine(sscreen);
 
 	/* Create the auxiliary context. This must be done last. */
 	sscreen->b.aux_context = si_create_context(&sscreen->b.b, 0);
 
-	if (sscreen->b.debug_flags & DBG_TEST_DMA)
+	if (sscreen->b.debug_flags & DBG(TEST_DMA))
 		si_test_dma(&sscreen->b);
 
-	if (sscreen->b.debug_flags & (DBG_TEST_VMFAULT_CP |
-				      DBG_TEST_VMFAULT_SDMA |
-				      DBG_TEST_VMFAULT_SHADER))
+	if (sscreen->b.debug_flags & (DBG(TEST_VMFAULT_CP) |
+				      DBG(TEST_VMFAULT_SDMA) |
+				      DBG(TEST_VMFAULT_SHADER)))
 		si_test_vmfault(sscreen);
 
 	return &sscreen->b.b;
 }
diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
index c0037fe..0d6f379 100644
--- a/src/gallium/drivers/radeonsi/si_shader.c
+++ b/src/gallium/drivers/radeonsi/si_shader.c
@@ -4079,21 +4079,21 @@ static void si_create_function(struct si_shader_context *ctx,
 	}
 
 	if (max_workgroup_size) {
 		si_llvm_add_attribute(ctx->main_fn, "amdgpu-max-work-group-size",
 				      max_workgroup_size);
 	}
 	LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
 					   "no-signed-zeros-fp-math",
 					   "true");
 
-	if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
+	if (ctx->screen->b.debug_flags & DBG(UNSAFE_MATH)) {
 		/* These were copied from some LLVM test. */
 		LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
 						   "less-precise-fpmad",
 						   "true");
 		LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
 						   "no-infs-fp-math",
 						   "true");
 		LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
 						   "no-nans-fp-math",
 						   "true");
@@ -5112,21 +5112,21 @@ void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
 			fprintf(file, "%s\n", shader->previous_stage->binary.llvm_ir_string);
 		}
 
 		fprintf(file, "\n%s - main shader part - LLVM IR:\n\n",
 			si_get_shader_name(shader, processor));
 		fprintf(file, "%s\n", shader->binary.llvm_ir_string);
 	}
 
 	if (!check_debug_option ||
 	    (si_can_dump_shader(&sscreen->b, processor) &&
-	     !(sscreen->b.debug_flags & DBG_NO_ASM))) {
+	     !(sscreen->b.debug_flags & DBG(NO_ASM)))) {
 		fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
 
 		if (shader->prolog)
 			si_shader_dump_disassembly(&shader->prolog->binary,
 						   debug, "prolog", file);
 		if (shader->previous_stage)
 			si_shader_dump_disassembly(&shader->previous_stage->binary,
 						   debug, "previous stage", file);
 		if (shader->prolog2)
 			si_shader_dump_disassembly(&shader->prolog2->binary,
@@ -5152,21 +5152,21 @@ static int si_compile_llvm(struct si_screen *sscreen,
 			   struct pipe_debug_callback *debug,
 			   unsigned processor,
 			   const char *name)
 {
 	int r = 0;
 	unsigned count = p_atomic_inc_return(&sscreen->b.num_compilations);
 
 	if (si_can_dump_shader(&sscreen->b, processor)) {
 		fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
 
-		if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
+		if (!(sscreen->b.debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
 			fprintf(stderr, "%s LLVM IR:\n\n", name);
 			ac_dump_module(mod);
 			fprintf(stderr, "\n");
 		}
 	}
 
 	if (sscreen->record_llvm_ir) {
 		char *ir = LLVMPrintModuleToString(mod);
 		binary->llvm_ir_string = strdup(ir);
 		LLVMDisposeMessage(ir);
@@ -5686,21 +5686,21 @@ static bool si_compile_tgsi_main(struct si_shader_context *ctx,
 	if (ctx->type == PIPE_SHADER_GEOMETRY) {
 		int i;
 		for (i = 0; i < 4; i++) {
 			ctx->gs_next_vertex[i] =
 				lp_build_alloca(&ctx->gallivm,
 						ctx->i32, "");
 		}
 	}
 
 	if (ctx->type == PIPE_SHADER_FRAGMENT && sel->info.uses_kill &&
-	    ctx->screen->b.debug_flags & DBG_FS_CORRECT_DERIVS_AFTER_KILL) {
+	    ctx->screen->b.debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL)) {
 		/* This is initialized to 0.0 = not kill. */
 		ctx->postponed_kill = lp_build_alloca(&ctx->gallivm, ctx->f32, "");
 	}
 
 	if (sel->tokens) {
 		if (!lp_build_tgsi_llvm(bld_base, sel->tokens)) {
 			fprintf(stderr, "Failed to translate shader from TGSI to LLVM\n");
 			return false;
 		}
 	} else {
@@ -6261,21 +6261,21 @@ int si_compile_tgsi_shader(struct si_screen *sscreen,
 			   bool is_monolithic,
 			   struct pipe_debug_callback *debug)
 {
 	struct si_shader_selector *sel = shader->selector;
 	struct si_shader_context ctx;
 	int r = -1;
 
 	/* Dump TGSI code before doing TGSI->LLVM conversion in case the
 	 * conversion fails. */
 	if (si_can_dump_shader(&sscreen->b, sel->info.processor) &&
-	    !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
+	    !(sscreen->b.debug_flags & DBG(NO_TGSI))) {
 		if (sel->tokens)
 			tgsi_dump(sel->tokens, 0);
 		else
 			nir_print_shader(sel->nir, stderr);
 		si_dump_streamout(&sel->so);
 	}
 
 	si_init_shader_ctx(&ctx, sscreen, tm);
 	si_llvm_context_set_tgsi(&ctx, shader);
 	ctx.separate_prolog = !is_monolithic;
diff --git a/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c b/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c
index 2aedf85..9ec5a87 100644
--- a/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c
+++ b/src/gallium/drivers/radeonsi/si_shader_tgsi_setup.c
@@ -1149,21 +1149,21 @@ void si_llvm_context_init(struct si_shader_context *ctx,
 	ctx->gallivm.module = LLVMModuleCreateWithNameInContext("tgsi",
 						ctx->gallivm.context);
 	LLVMSetTarget(ctx->gallivm.module, "amdgcn--");
 
 	LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
 	char *data_layout_str = LLVMCopyStringRepOfTargetData(data_layout);
 	LLVMSetDataLayout(ctx->gallivm.module, data_layout_str);
 	LLVMDisposeTargetData(data_layout);
 	LLVMDisposeMessage(data_layout_str);
 
-	bool unsafe_fpmath = (sscreen->b.debug_flags & DBG_UNSAFE_MATH) != 0;
+	bool unsafe_fpmath = (sscreen->b.debug_flags & DBG(UNSAFE_MATH)) != 0;
 	enum lp_float_mode float_mode =
 		unsafe_fpmath ? LP_FLOAT_MODE_UNSAFE_FP_MATH :
 				LP_FLOAT_MODE_NO_SIGNED_ZEROS_FP_MATH;
 
 	ctx->gallivm.builder = lp_create_builder(ctx->gallivm.context,
 						 float_mode);
 
 	ac_llvm_context_init(&ctx->ac, ctx->gallivm.context, sscreen->b.chip_class);
 	ctx->ac.module = ctx->gallivm.module;
 	ctx->ac.builder = ctx->gallivm.builder;
@@ -1351,21 +1351,21 @@ void si_llvm_create_func(struct si_shader_context *ctx,
 	LLVMSetFunctionCallConv(ctx->main_fn, call_conv);
 }
 
 void si_llvm_optimize_module(struct si_shader_context *ctx)
 {
 	struct gallivm_state *gallivm = &ctx->gallivm;
 	const char *triple = LLVMGetTarget(gallivm->module);
 	LLVMTargetLibraryInfoRef target_library_info;
 
 	/* Dump LLVM IR before any optimization passes */
-	if (ctx->screen->b.debug_flags & DBG_PREOPT_IR &&
+	if (ctx->screen->b.debug_flags & DBG(PREOPT_IR) &&
 	    si_can_dump_shader(&ctx->screen->b, ctx->type))
 		LLVMDumpModule(ctx->gallivm.module);
 
 	/* Create the pass manager */
 	gallivm->passmgr = LLVMCreatePassManager();
 
 	target_library_info = gallivm_create_target_library_info(triple);
 	LLVMAddTargetLibraryInfo(target_library_info, gallivm->passmgr);
 
 	if (si_extra_shader_checks(&ctx->screen->b, ctx->type))
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 7fca9a1..3980144 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -361,21 +361,21 @@ si_get_init_multi_vgt_param(struct si_screen *sscreen,
 				    sscreen->b.family == CHIP_POLARIS12)
 					partial_vs_wave = true;
 			} else {
 				partial_vs_wave = true;
 			}
 		}
 	}
 
 	/* This is a hardware requirement. */
 	if (key->u.line_stipple_enabled ||
-	    (sscreen->b.debug_flags & DBG_SWITCH_ON_EOP)) {
+	    (sscreen->b.debug_flags & DBG(SWITCH_ON_EOP))) {
 		ia_switch_on_eop = true;
 		wd_switch_on_eop = true;
 	}
 
 	if (sscreen->b.chip_class >= CIK) {
 		/* WD_SWITCH_ON_EOP has no effect on GPUs with less than
 		 * 4 shader engines. Set 1 to pass the assertion below.
 		 * The other cases are hardware requirements.
 		 *
 		 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c
index 1fadc7e..bb5d329 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -1470,21 +1470,21 @@ static inline void si_shader_selector_key(struct pipe_context *ctx,
 			}
 		}
 
 		key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx);
 		break;
 	}
 	default:
 		assert(0);
 	}
 
-	if (unlikely(sctx->screen->b.debug_flags & DBG_NO_OPT_VARIANT))
+	if (unlikely(sctx->screen->b.debug_flags & DBG(NO_OPT_VARIANT)))
 		memset(&key->opt, 0, sizeof(key->opt));
 }
 
 static void si_build_shader_variant(struct si_shader *shader,
 				    int thread_index,
 				    bool low_priority)
 {
 	struct si_shader_selector *sel = shader->selector;
 	struct si_screen *sscreen = sel->screen;
 	LLVMTargetMachineRef tm;
@@ -1900,21 +1900,21 @@ static void si_init_shader_selector_async(void *job, int thread_index)
 				case TGSI_SEMANTIC_PSIZE:
 				case TGSI_SEMANTIC_CLIPVERTEX:
 				case TGSI_SEMANTIC_EDGEFLAG:
 					break;
 				}
 			}
 		}
 	}
 
 	/* Pre-compilation. */
-	if (sscreen->b.debug_flags & DBG_PRECOMPILE &&
+	if (sscreen->b.debug_flags & DBG(PRECOMPILE) &&
 	    /* GFX9 needs LS or ES for compilation, which we don't have here. */
 	    (sscreen->b.chip_class <= VI ||
 	     (sel->type != PIPE_SHADER_TESS_CTRL &&
 	      sel->type != PIPE_SHADER_GEOMETRY))) {
 		struct si_shader_ctx_state state = {sel};
 		struct si_shader_key key;
 
 		memset(&key, 0, sizeof(key));
 		si_parse_next_shader_property(&sel->info,
 					      sel->so.num_outputs != 0,
-- 
2.7.4



More information about the mesa-dev mailing list