[Intel-gfx] [PATCH 2/2] drm/i915: Wrap execlist port completion into a function

Mika Kuoppala mika.kuoppala at linux.intel.com
Wed Aug 30 11:32:11 UTC 2017


When execlist entry is processed, we nullify the corresponding port.
Also when we reset the gpu we need to nullify ports. Introduce function
to do this common operation and also use it for cancellation for all ports,
in error handling and recovery paths.

Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c            | 11 ++---------
 drivers/gpu/drm/i915/i915_guc_submission.c | 10 +++++-----
 drivers/gpu/drm/i915/intel_lrc.c           | 18 +++++++-----------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 23 +++++++++++++++++++++++
 4 files changed, 37 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6dffab14a3e3..34387abda65b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3046,18 +3046,11 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
 	 */
 
 	if (i915.enable_execlists) {
-		struct execlist_port *port = engine->execlist.port;
+		struct intel_engine_execlist * const el = &engine->execlist;
 		unsigned long flags;
-		unsigned int n;
 
 		spin_lock_irqsave(&engine->timeline->lock, flags);
-
-		for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++)
-			i915_gem_request_put(port_request(&port[n]));
-		memset(engine->execlist.port, 0, sizeof(engine->execlist.port));
-		engine->execlist.queue = RB_ROOT;
-		engine->execlist.first = NULL;
-
+		execlist_cancel_port_requests(el);
 		spin_unlock_irqrestore(&engine->timeline->lock, flags);
 
 		/* The port is checked prior to scheduling a tasklet, but
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index f95defe18885..1056d45d2ac9 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -714,20 +714,20 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
 static void i915_guc_irq_handler(unsigned long data)
 {
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-	struct execlist_port *port = engine->execlist.port;
+	struct intel_engine_execlist * const el = &engine->execlist;
+	struct execlist_port *port = el->port;
 	struct drm_i915_gem_request *rq;
 	bool submit;
 
 	do {
-		rq = port_request(&port[0]);
+		rq = port_request(port);
 		while (rq && i915_gem_request_completed(rq)) {
 			trace_i915_gem_request_out(rq);
 			i915_gem_request_put(rq);
 
-			port[0] = port[1];
-			memset(&port[1], 0, sizeof(port[1]));
+			execlist_port_complete(el);
 
-			rq = port_request(&port[0]);
+			rq = port_request(port);
 		}
 
 		submit = false;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2964e7c0a873..7f154601c83d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -412,8 +412,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 */
 		last->tail = last->wa_tail;
 
-	GEM_BUG_ON(port_isset(&port[1]));
-
 	/* Hardware submission is through 2 ports. Conceptually each port
 	 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
 	 * static for a context, and unique to each, so we only execute
@@ -485,6 +483,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				if (submit)
 					port_assign(port, last);
 				port++;
+
+				GEM_BUG_ON(port_isset(port));
 			}
 
 			INIT_LIST_HEAD(&rq->priotree.link);
@@ -609,8 +609,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 				trace_i915_gem_request_out(rq);
 				i915_gem_request_put(rq);
 
-				port[0] = port[1];
-				memset(&port[1], 0, sizeof(port[1]));
+				execlist_port_complete(el);
 			} else {
 				port_set(port, port_pack(rq, count));
 			}
@@ -1329,9 +1328,9 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static void reset_common_ring(struct intel_engine_cs *engine,
 			      struct drm_i915_gem_request *request)
 {
-	struct execlist_port *port = engine->execlist.port;
+	struct intel_engine_execlist * const el = &engine->execlist;
+	struct execlist_port *port = el->port;
 	struct intel_context *ce;
-	unsigned int n;
 
 	/*
 	 * Catch up with any missed context-switch interrupts.
@@ -1343,16 +1342,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * requests were completed.
 	 */
 	if (!request) {
-		for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++)
-			i915_gem_request_put(port_request(&port[n]));
-		memset(engine->execlist.port, 0, sizeof(engine->execlist.port));
+		execlist_cancel_port_requests(el);
 		return;
 	}
 
 	if (request->ctx != port_request(port)->ctx) {
 		i915_gem_request_put(port_request(port));
-		port[0] = port[1];
-		memset(&port[1], 0, sizeof(port[1]));
+		execlist_port_complete(el);
 	}
 
 	GEM_BUG_ON(request->ctx != port_request(port)->ctx);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5788bf713e27..9b4ed9f1f122 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -493,6 +493,29 @@ struct intel_engine_cs {
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
 
+static inline void
+execlist_port_complete(struct intel_engine_execlist * const el)
+{
+	struct execlist_port * const port = el->port;
+
+	port[0] = port[1];
+	memset(&port[1], 0, sizeof(port[1]));
+}
+
+static inline void
+execlist_cancel_port_requests(struct intel_engine_execlist * const el)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(el->port); i++) {
+		i915_gem_request_put(port_request(&el->port[i]));
+		execlist_port_complete(el);
+	}
+
+	el->queue = RB_ROOT;
+	el->first = NULL;
+}
+
 static inline unsigned int
 intel_engine_flag(const struct intel_engine_cs *engine)
 {
-- 
2.11.0



More information about the Intel-gfx mailing list