[Mesa-dev] [PATCH 12/13] gallium/radeon: add threaded context counter monitoring for HUD

Marek Olšák maraeo at gmail.com
Wed May 10 22:45:45 UTC 2017


From: Marek Olšák <marek.olsak at amd.com>

"tc" will be initialized by the next commit.
---
 src/gallium/drivers/radeon/r600_pipe_common.h |  1 +
 src/gallium/drivers/radeon/r600_query.c       | 21 +++++++++++++++++++++
 src/gallium/drivers/radeon/r600_query.h       |  3 +++
 3 files changed, 25 insertions(+)

diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
index f9c9f11..b17b690 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.h
+++ b/src/gallium/drivers/radeon/r600_pipe_common.h
@@ -546,20 +546,21 @@ struct r600_common_context {
 	struct r600_ring		gfx;
 	struct r600_ring		dma;
 	struct pipe_fence_handle	*last_gfx_fence;
 	struct pipe_fence_handle	*last_sdma_fence;
 	unsigned			num_gfx_cs_flushes;
 	unsigned			initial_gfx_cs_size;
 	unsigned			gpu_reset_counter;
 	unsigned			last_dirty_tex_counter;
 	unsigned			last_compressed_colortex_counter;
 
+	struct threaded_context		*tc;
 	struct u_suballocator		*allocator_zeroed_memory;
 	struct slab_child_pool		pool_transfers;
 	struct slab_child_pool		pool_transfers_unsync; /* for threaded_context */
 
 	/* Current unaccounted memory usage. */
 	uint64_t			vram;
 	uint64_t			gtt;
 
 	/* States. */
 	struct r600_streamout		streamout;
diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c
index dac9b9c..a80bbf5 100644
--- a/src/gallium/drivers/radeon/r600_query.c
+++ b/src/gallium/drivers/radeon/r600_query.c
@@ -126,20 +126,29 @@ static bool r600_query_sw_begin(struct r600_common_context *rctx,
 		break;
 	case R600_QUERY_NUM_FB_CACHE_FLUSHES:
 		query->begin_result = rctx->num_fb_cache_flushes;
 		break;
 	case R600_QUERY_NUM_L2_INVALIDATES:
 		query->begin_result = rctx->num_L2_invalidates;
 		break;
 	case R600_QUERY_NUM_L2_WRITEBACKS:
 		query->begin_result = rctx->num_L2_writebacks;
 		break;
+	case R600_QUERY_TC_OFFLOADED_CALLS:
+		query->begin_result = rctx->tc ? rctx->tc->num_offloaded_calls : 0;
+		break;
+	case R600_QUERY_TC_DIRECT_CALLS:
+		query->begin_result = rctx->tc ? rctx->tc->num_direct_calls : 0;
+		break;
+	case R600_QUERY_TC_NUM_SYNCS:
+		query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0;
+		break;
 	case R600_QUERY_REQUESTED_VRAM:
 	case R600_QUERY_REQUESTED_GTT:
 	case R600_QUERY_MAPPED_VRAM:
 	case R600_QUERY_MAPPED_GTT:
 	case R600_QUERY_VRAM_USAGE:
 	case R600_QUERY_VRAM_VIS_USAGE:
 	case R600_QUERY_GTT_USAGE:
 	case R600_QUERY_GPU_TEMPERATURE:
 	case R600_QUERY_CURRENT_GPU_SCLK:
 	case R600_QUERY_CURRENT_GPU_MCLK:
@@ -253,20 +262,29 @@ static bool r600_query_sw_end(struct r600_common_context *rctx,
 		break;
 	case R600_QUERY_NUM_FB_CACHE_FLUSHES:
 		query->end_result = rctx->num_fb_cache_flushes;
 		break;
 	case R600_QUERY_NUM_L2_INVALIDATES:
 		query->end_result = rctx->num_L2_invalidates;
 		break;
 	case R600_QUERY_NUM_L2_WRITEBACKS:
 		query->end_result = rctx->num_L2_writebacks;
 		break;
+	case R600_QUERY_TC_OFFLOADED_CALLS:
+		query->end_result = rctx->tc ? rctx->tc->num_offloaded_calls : 0;
+		break;
+	case R600_QUERY_TC_DIRECT_CALLS:
+		query->end_result = rctx->tc ? rctx->tc->num_direct_calls : 0;
+		break;
+	case R600_QUERY_TC_NUM_SYNCS:
+		query->end_result = rctx->tc ? rctx->tc->num_syncs : 0;
+		break;
 	case R600_QUERY_REQUESTED_VRAM:
 	case R600_QUERY_REQUESTED_GTT:
 	case R600_QUERY_MAPPED_VRAM:
 	case R600_QUERY_MAPPED_GTT:
 	case R600_QUERY_VRAM_USAGE:
 	case R600_QUERY_VRAM_VIS_USAGE:
 	case R600_QUERY_GTT_USAGE:
 	case R600_QUERY_GPU_TEMPERATURE:
 	case R600_QUERY_CURRENT_GPU_SCLK:
 	case R600_QUERY_CURRENT_GPU_MCLK:
@@ -1778,20 +1796,23 @@ static struct pipe_driver_query_info r600_driver_query_list[] = {
 	X("compute-calls",		COMPUTE_CALLS,		UINT64, AVERAGE),
 	X("spill-compute-calls",	SPILL_COMPUTE_CALLS,	UINT64, AVERAGE),
 	X("dma-calls",			DMA_CALLS,		UINT64, AVERAGE),
 	X("cp-dma-calls",		CP_DMA_CALLS,		UINT64, AVERAGE),
 	X("num-vs-flushes",		NUM_VS_FLUSHES,		UINT64, AVERAGE),
 	X("num-ps-flushes",		NUM_PS_FLUSHES,		UINT64, AVERAGE),
 	X("num-cs-flushes",		NUM_CS_FLUSHES,		UINT64, AVERAGE),
 	X("num-fb-cache-flushes",	NUM_FB_CACHE_FLUSHES,	UINT64, AVERAGE),
 	X("num-L2-invalidates",		NUM_L2_INVALIDATES,	UINT64, AVERAGE),
 	X("num-L2-writebacks",		NUM_L2_WRITEBACKS,	UINT64, AVERAGE),
+	X("tc-offloaded-calls",		TC_OFFLOADED_CALLS,     UINT64, AVERAGE),
+	X("tc-direct-calls",		TC_DIRECT_CALLS,	UINT64, AVERAGE),
+	X("tc-num-syncs",		TC_NUM_SYNCS,		UINT64, AVERAGE),
 	X("CS-thread-busy",		CS_THREAD_BUSY,		UINT64, AVERAGE),
 	X("requested-VRAM",		REQUESTED_VRAM,		BYTES, AVERAGE),
 	X("requested-GTT",		REQUESTED_GTT,		BYTES, AVERAGE),
 	X("mapped-VRAM",		MAPPED_VRAM,		BYTES, AVERAGE),
 	X("mapped-GTT",			MAPPED_GTT,		BYTES, AVERAGE),
 	X("buffer-wait-time",		BUFFER_WAIT_TIME,	MICROSECONDS, CUMULATIVE),
 	X("num-mapped-buffers",		NUM_MAPPED_BUFFERS,	UINT64, AVERAGE),
 	X("num-GFX-IBs",		NUM_GFX_IBS,		UINT64, AVERAGE),
 	X("num-SDMA-IBs",		NUM_SDMA_IBS,		UINT64, AVERAGE),
 	X("num-bytes-moved",		NUM_BYTES_MOVED,	BYTES, CUMULATIVE),
diff --git a/src/gallium/drivers/radeon/r600_query.h b/src/gallium/drivers/radeon/r600_query.h
index 485bb61..a0257bc 100644
--- a/src/gallium/drivers/radeon/r600_query.h
+++ b/src/gallium/drivers/radeon/r600_query.h
@@ -47,20 +47,23 @@ enum {
 	R600_QUERY_COMPUTE_CALLS,
 	R600_QUERY_SPILL_COMPUTE_CALLS,
 	R600_QUERY_DMA_CALLS,
 	R600_QUERY_CP_DMA_CALLS,
 	R600_QUERY_NUM_VS_FLUSHES,
 	R600_QUERY_NUM_PS_FLUSHES,
 	R600_QUERY_NUM_CS_FLUSHES,
 	R600_QUERY_NUM_FB_CACHE_FLUSHES,
 	R600_QUERY_NUM_L2_INVALIDATES,
 	R600_QUERY_NUM_L2_WRITEBACKS,
+	R600_QUERY_TC_OFFLOADED_CALLS,
+	R600_QUERY_TC_DIRECT_CALLS,
+	R600_QUERY_TC_NUM_SYNCS,
 	R600_QUERY_CS_THREAD_BUSY,
 	R600_QUERY_REQUESTED_VRAM,
 	R600_QUERY_REQUESTED_GTT,
 	R600_QUERY_MAPPED_VRAM,
 	R600_QUERY_MAPPED_GTT,
 	R600_QUERY_BUFFER_WAIT_TIME,
 	R600_QUERY_NUM_MAPPED_BUFFERS,
 	R600_QUERY_NUM_GFX_IBS,
 	R600_QUERY_NUM_SDMA_IBS,
 	R600_QUERY_NUM_BYTES_MOVED,
-- 
2.7.4



More information about the mesa-dev mailing list