[Intel-gfx] [PATCH] drm/i915: Serialise read/write of the barrier's engine

Chris Wilson chris at chris-wilson.co.uk
Tue Aug 13 20:09:05 UTC 2019


We use the request pointer inside the i915_active_node as the indicator
of the barrier's status; we mark it as used during
i915_request_add_active_barriers(), and search for an available barrier
in reuse_idle_barrier(). That check must be carefully serialised to
ensure we do use an engine for the barrier and not just a random
pointer. (Along the other reuse path, we are fully serialised by the
timeline->mutex.) The acquisition of the barrier itself is ordered through
the strong memory barrier in llist_del_all().

Fixes: d8af05ff38ae ("drm/i915: Allow sharing the idle-barrier from other kernel requests")
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c | 28 +++++++++++++++++++++++-----
 1 file changed, 23 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 7698fcaa648a..2439c4f62ad8 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -52,11 +52,17 @@ static inline struct llist_node *barrier_to_ll(struct active_node *node)
 	return (struct llist_node *)&node->base.link;
 }
 
+static inline struct intel_engine_cs *
+__barrier_to_engine(struct active_node *node)
+{
+	return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
+}
+
 static inline struct intel_engine_cs *
 barrier_to_engine(struct active_node *node)
 {
 	GEM_BUG_ON(!is_barrier(&node->base));
-	return (struct intel_engine_cs *)node->base.link.prev;
+	return __barrier_to_engine(node);
 }
 
 static inline struct active_node *barrier_from_ll(struct llist_node *x)
@@ -239,10 +245,11 @@ void __i915_active_init(struct drm_i915_private *i915,
 	__mutex_init(&ref->mutex, "i915_active", key);
 }
 
-static bool __active_del_barrier(struct i915_active *ref,
-				 struct active_node *node)
+static bool ____active_del_barrier(struct i915_active *ref,
+				   struct active_node *node,
+				   struct intel_engine_cs *engine)
+
 {
-	struct intel_engine_cs *engine = barrier_to_engine(node);
 	struct llist_node *head = NULL, *tail = NULL;
 	struct llist_node *pos, *next;
 
@@ -280,6 +287,12 @@ static bool __active_del_barrier(struct i915_active *ref,
 	return !node;
 }
 
+static bool
+__active_del_barrier(struct i915_active *ref, struct active_node *node)
+{
+	return ____active_del_barrier(ref, node, barrier_to_engine(node));
+}
+
 int i915_active_ref(struct i915_active *ref,
 		    u64 timeline,
 		    struct i915_request *rq)
@@ -517,6 +530,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 	for (p = prev; p; p = rb_next(p)) {
 		struct active_node *node =
 			rb_entry(p, struct active_node, node);
+		struct intel_engine_cs *engine;
 
 		if (node->timeline > idx)
 			break;
@@ -534,7 +548,10 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 		 * the barrier before we claim it, so we have to check
 		 * for success.
 		 */
-		if (is_barrier(&node->base) && __active_del_barrier(ref, node))
+		engine = __barrier_to_engine(node);
+		smp_rmb(); /* serialise with add_active_barriers */
+		if (is_barrier(&node->base) &&
+		    ____active_del_barrier(ref, node, engine))
 			goto match;
 	}
 
@@ -674,6 +691,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
 	 */
 	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
 		RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
+		smp_wmb(); /* serialise with reuse_idle_barrier */
 		list_add_tail((struct list_head *)node, &rq->active_list);
 	}
 }
-- 
2.23.0.rc1



More information about the Intel-gfx mailing list