[Intel-gfx] [RFC 12/17] drm/i915: Track per-context engine busyness

Tvrtko Ursulin tursulin at ursulin.net
Wed Oct 25 15:36:27 UTC 2017


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Some customers want to know how much of the GPU time are their clients
using in order to make dynamic load balancing decisions.

With the hooks already in place which track the overall engine busyness,
we can extend that slightly to split that time between contexts.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: gordon.kelly at intel.com
---
 drivers/gpu/drm/i915/i915_gem_context.h |  4 ++++
 drivers/gpu/drm/i915/intel_lrc.c        | 14 +++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h | 39 ++++++++++++++++++++++++++++-----
 3 files changed, 46 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 44688e22a5c2..15134329dbf0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -158,6 +158,10 @@ struct i915_gem_context {
 		u64 lrc_desc;
 		int pin_count;
 		bool initialised;
+		struct {
+			ktime_t start;
+			ktime_t total;
+		} stats;
 	} engine[I915_NUM_ENGINES];
 
 	/** ring_size: size for allocating the per-engine ring buffer */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fcdd1c52f5ae..2f496894aa40 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -401,16 +401,19 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 }
 
 static inline void
-execlists_context_schedule_in(struct drm_i915_gem_request *rq)
+execlists_context_schedule_in(struct drm_i915_gem_request *rq,
+			      unsigned int port)
 {
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
-	intel_engine_context_in(rq->engine);
+	intel_engine_context_in(rq->engine,
+				&rq->ctx->engine[rq->engine->id],
+				port == 0);
 }
 
 static inline void
 execlists_context_schedule_out(struct drm_i915_gem_request *rq)
 {
-	intel_engine_context_out(rq->engine);
+	intel_engine_context_out(rq->engine, &rq->ctx->engine[rq->engine->id]);
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 }
 
@@ -465,7 +468,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 		if (rq) {
 			GEM_BUG_ON(count > !n);
 			if (!count++)
-				execlists_context_schedule_in(rq);
+				execlists_context_schedule_in(rq, n);
 			port_set(&port[n], port_pack(rq, count));
 			desc = execlists_update_context(rq);
 			GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
@@ -714,7 +717,8 @@ execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
 		struct drm_i915_gem_request *rq = port_request(port);
 
 		GEM_BUG_ON(!execlists->active);
-		intel_engine_context_out(rq->engine);
+		intel_engine_context_out(rq->engine,
+					 &rq->ctx->engine[rq->engine->id]);
 		execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
 		i915_gem_request_put(rq);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3b1dbcbb9d1f..25dd238d5d00 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,6 +3,7 @@
 
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
+#include "i915_gem_context.h"
 #include "i915_gem_request.h"
 #include "i915_gem_timeline.h"
 #include "i915_pmu.h"
@@ -953,25 +954,37 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
 struct intel_engine_cs *
 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
 
-static inline void intel_engine_context_in(struct intel_engine_cs *engine)
+static inline void
+intel_engine_context_in(struct intel_engine_cs *engine,
+			struct intel_context *ce,
+			bool submit)
 {
 	unsigned long flags;
+	ktime_t now;
 
 	if (READ_ONCE(engine->stats.enabled) == 0)
 		return;
 
 	spin_lock_irqsave(&engine->stats.lock, flags);
 
+	now = submit ? ktime_get() : 0;
+	ce->stats.start = now;
+
 	if (engine->stats.enabled > 0) {
-		if (engine->stats.active++ == 0)
-			engine->stats.start = ktime_get();
+		if (engine->stats.active++ == 0) {
+			if (!now)
+				now = ktime_get();
+			engine->stats.start = now;
+		}
 		GEM_BUG_ON(engine->stats.active == 0);
 	}
 
 	spin_unlock_irqrestore(&engine->stats.lock, flags);
 }
 
-static inline void intel_engine_context_out(struct intel_engine_cs *engine)
+static inline void
+intel_engine_context_out(struct intel_engine_cs *engine,
+			 struct intel_context *ce)
 {
 	unsigned long flags;
 
@@ -981,14 +994,28 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 	spin_lock_irqsave(&engine->stats.lock, flags);
 
 	if (engine->stats.enabled > 0) {
+		struct execlist_port *next_port = &engine->execlists.port[1];
+		ktime_t now = ktime_get();
 		ktime_t last;
 
+		GEM_BUG_ON(!ce->stats.start);
+		ce->stats.total = ktime_add(ce->stats.total,
+					    ktime_sub(now, ce->stats.start));
+		if (port_isset(next_port)) {
+			struct drm_i915_gem_request *next_req =
+						port_request(next_port);
+			struct intel_context *next_ce =
+					&next_req->ctx->engine[engine->id];
+
+			next_ce->stats.start = now;
+		}
+
 		if (engine->stats.active && --engine->stats.active == 0) {
 			/*
 			 * Decrement the active context count and in case GPU
 			 * is now idle add up to the running total.
 			 */
-			last = ktime_sub(ktime_get(), engine->stats.start);
+			last = ktime_sub(now, engine->stats.start);
 
 			engine->stats.total = ktime_add(engine->stats.total,
 							last);
@@ -998,7 +1025,7 @@ static inline void intel_engine_context_out(struct intel_engine_cs *engine)
 			 * the first event in which case we account from the
 			 * time stats gathering was turned on.
 			 */
-			last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+			last = ktime_sub(now, engine->stats.enabled_at);
 
 			engine->stats.total = ktime_add(engine->stats.total,
 							last);
-- 
2.9.5



More information about the Intel-gfx mailing list