[Mesa-dev] [PATCH 1/3] gallium/radeon: rename grbm to mmio in the gpu load path

Samuel Pitoiset samuel.pitoiset at gmail.com
Thu Jan 26 19:54:45 UTC 2017


We also want to monitor other MMIO counters like SRBM_STATUS2 in
order to know if SDMA is busy.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset at gmail.com>
---
 src/gallium/drivers/radeon/r600_gpu_load.c    | 30 +++++++++++------------
 src/gallium/drivers/radeon/r600_pipe_common.h | 35 ++++++++++++++-------------
 2 files changed, 33 insertions(+), 32 deletions(-)

diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c b/src/gallium/drivers/radeon/r600_gpu_load.c
index 83b7bd7210..c84b86d76c 100644
--- a/src/gallium/drivers/radeon/r600_gpu_load.c
+++ b/src/gallium/drivers/radeon/r600_gpu_load.c
@@ -66,8 +66,8 @@
 			p_atomic_inc(&counters->named.field.idle);	\
 	} while (0)
 
-static void r600_update_grbm_counters(struct r600_common_screen *rscreen,
-				      union r600_grbm_counters *counters)
+static void r600_update_mmio_counters(struct r600_common_screen *rscreen,
+				      union r600_mmio_counters *counters)
 {
 	uint32_t value = 0;
 
@@ -116,7 +116,7 @@ static PIPE_THREAD_ROUTINE(r600_gpu_load_thread, param)
 		last_time = cur_time;
 
 		/* Update the counters. */
-		r600_update_grbm_counters(rscreen, &rscreen->grbm_counters);
+		r600_update_mmio_counters(rscreen, &rscreen->mmio_counters);
 	}
 	p_atomic_dec(&rscreen->gpu_load_stop_thread);
 	return 0;
@@ -132,7 +132,7 @@ void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
 	rscreen->gpu_load_thread = 0;
 }
 
-static uint64_t r600_read_grbm_counter(struct r600_common_screen *rscreen,
+static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
 				       unsigned busy_index)
 {
 	/* Start the thread if needed. */
@@ -145,16 +145,16 @@ static uint64_t r600_read_grbm_counter(struct r600_common_screen *rscreen,
 		pipe_mutex_unlock(rscreen->gpu_load_mutex);
 	}
 
-	unsigned busy = p_atomic_read(&rscreen->grbm_counters.array[busy_index]);
-	unsigned idle = p_atomic_read(&rscreen->grbm_counters.array[busy_index + 1]);
+	unsigned busy = p_atomic_read(&rscreen->mmio_counters.array[busy_index]);
+	unsigned idle = p_atomic_read(&rscreen->mmio_counters.array[busy_index + 1]);
 
 	return busy | ((uint64_t)idle << 32);
 }
 
-static unsigned r600_end_grbm_counter(struct r600_common_screen *rscreen,
+static unsigned r600_end_mmio_counter(struct r600_common_screen *rscreen,
 				      uint64_t begin, unsigned busy_index)
 {
-	uint64_t end = r600_read_grbm_counter(rscreen, busy_index);
+	uint64_t end = r600_read_mmio_counter(rscreen, busy_index);
 	unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
 	unsigned idle = (end >> 32) - (begin >> 32);
 
@@ -167,16 +167,16 @@ static unsigned r600_end_grbm_counter(struct r600_common_screen *rscreen,
 	if (idle || busy) {
 		return busy*100 / (busy + idle);
 	} else {
-		union r600_grbm_counters counters;
+		union r600_mmio_counters counters;
 
 		memset(&counters, 0, sizeof(counters));
-		r600_update_grbm_counters(rscreen, &counters);
+		r600_update_mmio_counters(rscreen, &counters);
 		return counters.array[busy_index] ? 100 : 0;
 	}
 }
 
-#define BUSY_INDEX(rscreen, field) (&rscreen->grbm_counters.named.field.busy - \
-				    rscreen->grbm_counters.array)
+#define BUSY_INDEX(rscreen, field) (&rscreen->mmio_counters.named.field.busy - \
+				    rscreen->mmio_counters.array)
 
 static unsigned busy_index_from_type(struct r600_common_screen *rscreen,
 				     unsigned type)
@@ -211,19 +211,19 @@ static unsigned busy_index_from_type(struct r600_common_screen *rscreen,
 	case R600_QUERY_GPU_CB_BUSY:
 		return BUSY_INDEX(rscreen, cb);
 	default:
-		unreachable("query type does not correspond to grbm id");
+		unreachable("invalid query type");
 	}
 }
 
 uint64_t r600_begin_counter(struct r600_common_screen *rscreen, unsigned type)
 {
 	unsigned busy_index = busy_index_from_type(rscreen, type);
-	return r600_read_grbm_counter(rscreen, busy_index);
+	return r600_read_mmio_counter(rscreen, busy_index);
 }
 
 unsigned r600_end_counter(struct r600_common_screen *rscreen, unsigned type,
 			  uint64_t begin)
 {
 	unsigned busy_index = busy_index_from_type(rscreen, type);
-	return r600_end_grbm_counter(rscreen, begin, busy_index);
+	return r600_end_mmio_counter(rscreen, begin, busy_index);
 }
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
index afb1385f97..76fbf2af98 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.h
+++ b/src/gallium/drivers/radeon/r600_pipe_common.h
@@ -352,27 +352,28 @@ struct r600_surface {
 	unsigned db_preload_control;	/* EG and later */
 };
 
-struct r600_grbm_counter {
+struct r600_mmio_counter {
 	unsigned busy;
 	unsigned idle;
 };
 
-union r600_grbm_counters {
+union r600_mmio_counters {
 	struct {
-		struct r600_grbm_counter spi;
-		struct r600_grbm_counter gui;
-		struct r600_grbm_counter ta;
-		struct r600_grbm_counter gds;
-		struct r600_grbm_counter vgt;
-		struct r600_grbm_counter ia;
-		struct r600_grbm_counter sx;
-		struct r600_grbm_counter wd;
-		struct r600_grbm_counter bci;
-		struct r600_grbm_counter sc;
-		struct r600_grbm_counter pa;
-		struct r600_grbm_counter db;
-		struct r600_grbm_counter cp;
-		struct r600_grbm_counter cb;
+		/* GRBM_STATUS */
+		struct r600_mmio_counter spi;
+		struct r600_mmio_counter gui;
+		struct r600_mmio_counter ta;
+		struct r600_mmio_counter gds;
+		struct r600_mmio_counter vgt;
+		struct r600_mmio_counter ia;
+		struct r600_mmio_counter sx;
+		struct r600_mmio_counter wd;
+		struct r600_mmio_counter bci;
+		struct r600_mmio_counter sc;
+		struct r600_mmio_counter pa;
+		struct r600_mmio_counter db;
+		struct r600_mmio_counter cp;
+		struct r600_mmio_counter cb;
 	} named;
 	unsigned array[0];
 };
@@ -410,7 +411,7 @@ struct r600_common_screen {
 	/* GPU load thread. */
 	pipe_mutex			gpu_load_mutex;
 	pipe_thread			gpu_load_thread;
-	union r600_grbm_counters	grbm_counters;
+	union r600_mmio_counters	mmio_counters;
 	volatile unsigned		gpu_load_stop_thread; /* bool */
 
 	char				renderer_string[100];
-- 
2.11.0



More information about the mesa-dev mailing list