[Intel-gfx] [PATCH v2 6/8] drm/i915/scheduler: Split insert_request

Michał Winiarski michal.winiarski at intel.com
Mon May 22 22:07:53 UTC 2017


We'd like to reuse the priolist lookup in request resubmission path,
let's split insert_request to make that happen.

v2: Handle allocation error in lookup rather than in caller (Chris)

Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Jeff McGee <jeff.mcgee at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski at intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 41 ++++++++++++++++++++++++----------------
 1 file changed, 25 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1255724..8fc852c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -627,20 +627,16 @@ static void intel_lrc_irq_handler(unsigned long data)
 	intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
 }
 
-static bool
-insert_request(struct intel_engine_cs *engine,
-	       struct i915_priotree *pt,
-	       int prio)
+static struct i915_priolist *
+priolist_lookup(struct intel_engine_cs *engine, int prio, bool *first)
 {
 	struct i915_priolist *p;
 	struct rb_node **parent, *rb;
-	bool first = true;
 
+find_priolist:
 	if (unlikely(engine->no_priolist))
 		prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
-	/* most positive priority is scheduled first, equal priorities fifo */
+	*first = true;
 	rb = NULL;
 	parent = &engine->execlist_queue.rb_node;
 	while (*parent) {
@@ -650,10 +646,10 @@ insert_request(struct intel_engine_cs *engine,
 			parent = &rb->rb_left;
 		} else if (prio < p->priority) {
 			parent = &rb->rb_right;
-			first = false;
+			*first = false;
 		} else {
-			list_add_tail(&pt->link, &p->requests);
-			return false;
+			*first = false;
+			return p;
 		}
 	}
 
@@ -661,10 +657,8 @@ insert_request(struct intel_engine_cs *engine,
 		p = &engine->default_priolist;
 	} else {
 		p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
-		/* Convert an allocation failure to a priority bump */
-		if (unlikely(!p)) {
-			prio = I915_PRIORITY_NORMAL; /* recurses just once */
 
+		if (unlikely(!p)) {
 			/* To maintain ordering with all rendering, after an
 			 * allocation failure we have to disable all scheduling.
 			 * Requests will then be executed in fifo, and schedule
@@ -683,11 +677,26 @@ insert_request(struct intel_engine_cs *engine,
 	rb_insert_color(&p->node, &engine->execlist_queue);
 
 	INIT_LIST_HEAD(&p->requests);
-	list_add_tail(&pt->link, &p->requests);
 
-	if (first)
+	if (*first)
 		engine->execlist_first = &p->node;
 
+	return p;
+}
+
+static bool
+insert_request(struct intel_engine_cs *engine,
+	       struct i915_priotree *pt,
+	       int prio)
+{
+	struct i915_priolist *p;
+	bool first = false;
+
+	p = priolist_lookup(engine, prio, &first);
+
+	/* most positive priority is scheduled first, equal priorities fifo */
+	list_add_tail(&pt->link, &p->requests);
+
 	return first;
 }
 
-- 
2.9.4



More information about the Intel-gfx mailing list