[PATCH 17/27] drm/i915: Push the i915_active.retire into a worker

Chris Wilson chris at chris-wilson.co.uk
Tue Jun 18 20:14:23 UTC 2019


As we need to use a mutex to serialise i915_active activation
(because we want to allow the callback to sleep), we need to push the
i915_active.retire into a worker callback in case we get need to retire
from an atomic context.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c |  1 +
 drivers/gpu/drm/i915/gt/intel_context.c     |  1 +
 drivers/gpu/drm/i915/i915_active.c          | 72 ++++++++++++++++-----
 drivers/gpu/drm/i915/i915_active_types.h    | 12 ++++
 drivers/gpu/drm/i915/i915_timeline.c        |  1 +
 drivers/gpu/drm/i915/i915_vma.c             |  3 +-
 6 files changed, 74 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 911f3564972d..55b8f0e04846 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -891,6 +891,7 @@ struct context_barrier_task {
 	void *data;
 };
 
+__i915_active_call
 static void cb_retire(struct i915_active *base)
 {
 	struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 20c708ae6dc0..70d3b2568227 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -123,6 +123,7 @@ static void __context_unpin_state(struct i915_vma *vma)
 	__i915_vma_unpin(vma);
 }
 
+__i915_active_call
 static void __intel_context_retire(struct i915_active *active)
 {
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 6eec9110d749..b48ac43fc857 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -97,18 +97,14 @@ static void debug_active_assert(struct i915_active *ref)
 #endif
 
 static void
-active_retire(struct i915_active *ref)
+__active_retire(struct i915_active *ref)
 {
 	struct active_node *it, *n;
 	struct rb_root root;
 	bool retire = false;
 
-	GEM_BUG_ON(!atomic_read(&ref->count));
-	if (atomic_add_unless(&ref->count, -1, 1))
-		return;
-
-	/* One active may be flushed from inside the acquire of another */
-	mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+	lockdep_assert_held(&ref->mutex);
+	GEM_BUG_ON(i915_active_is_idle(ref));
 
 	/* return the unused nodes to our slabcache -- flushing the allocator */
 	if (atomic_dec_and_test(&ref->count)) {
@@ -131,6 +127,36 @@ active_retire(struct i915_active *ref)
 	}
 }
 
+static void
+active_work(struct work_struct *wrk)
+{
+	struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+	GEM_BUG_ON(!atomic_read(&ref->count));
+	if (atomic_add_unless(&ref->count, -1, 1))
+		return;
+
+	mutex_lock(&ref->mutex);
+	__active_retire(ref);
+}
+
+static void
+active_retire(struct i915_active *ref)
+{
+	GEM_BUG_ON(!atomic_read(&ref->count));
+	if (atomic_add_unless(&ref->count, -1, 1))
+		return;
+
+	/* If we are inside interrupt context (fence signaling), defer */
+	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
+	    !mutex_trylock(&ref->mutex)) {
+		queue_work(system_unbound_wq, &ref->work);
+		return;
+	}
+
+	__active_retire(ref);
+}
+
 static void
 node_retire(struct i915_active_request *base, struct i915_request *rq)
 {
@@ -200,16 +226,24 @@ void __i915_active_init(struct drm_i915_private *i915,
 			void (*retire)(struct i915_active *ref),
 			struct lock_class_key *key)
 {
+	unsigned long bits;
+
 	debug_active_init(ref);
 
 	ref->i915 = i915;
+
+	ref->flags = 0;
 	ref->active = active;
-	ref->retire = retire;
+	ref->retire = ptr_unpack_bits(retire, &bits, 2);
+	if (bits & I915_ACTIVE_MAY_SLEEP)
+		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+
 	ref->tree = RB_ROOT;
 	ref->cache = NULL;
 	init_llist_head(&ref->barriers);
 	atomic_set(&ref->count, 0);
 	__mutex_init(&ref->mutex, "i915_active", key);
+	INIT_WORK(&ref->work, active_work);
 }
 
 int i915_active_ref(struct i915_active *ref,
@@ -282,8 +316,10 @@ int i915_active_wait(struct i915_active *ref)
 	if (err)
 		return err;
 
-	if (!atomic_add_unless(&ref->count, 1, 0))
-		goto unlock;
+	if (!atomic_add_unless(&ref->count, 1, 0)) {
+		mutex_unlock(&ref->mutex);
+		return 0;
+	}
 
 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 		err = i915_active_request_retire(&it->base, BKL(ref));
@@ -291,10 +327,15 @@ int i915_active_wait(struct i915_active *ref)
 			break;
 	}
 
-	active_retire(ref);
-unlock:
-	mutex_unlock(&ref->mutex);
-	return err;
+	__active_retire(ref);
+	if (err)
+		return err;
+
+	flush_work(&ref->work);
+	if (!i915_active_is_idle(ref))
+		return -EBUSY;
+
+	return 0;
 }
 
 int i915_request_await_active_request(struct i915_request *rq,
@@ -335,8 +376,9 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
 void i915_active_fini(struct i915_active *ref)
 {
 	debug_active_fini(ref);
-	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
 	GEM_BUG_ON(atomic_read(&ref->count));
+	GEM_BUG_ON(work_pending(&ref->work));
+	GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
 	mutex_destroy(&ref->mutex);
 }
 #endif
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 5b0a3024ce24..a3a5ec3e4163 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -12,6 +12,9 @@
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/rcupdate.h>
+#include <linux/workqueue.h>
+
+#include "i915_utils.h"
 
 struct drm_i915_private;
 struct i915_active_request;
@@ -28,6 +31,11 @@ struct i915_active_request {
 
 struct active_node;
 
+#define I915_ACTIVE_MAY_SLEEP BIT(0)
+
+#define __i915_active_call __aligned(4)
+#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
+
 struct i915_active {
 	struct drm_i915_private *i915;
 
@@ -38,6 +46,10 @@ struct i915_active {
 
 	int (*active)(struct i915_active *ref);
 	void (*retire)(struct i915_active *ref);
+	unsigned long flags;
+#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
+
+	struct work_struct work;
 
 	struct llist_head barriers;
 };
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index 3ea1b881a4ae..0f4ca9478ae5 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -138,6 +138,7 @@ static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
 	kfree(cl);
 }
 
+__i915_active_call
 static void __cacheline_retire(struct i915_active *active)
 {
 	struct i915_timeline_cacheline *cl =
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 8863b504d996..aa0df8332e25 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -88,6 +88,7 @@ static int __i915_vma_active(struct i915_active *ref)
 	return 0;
 }
 
+__i915_active_call
 static void __i915_vma_retire(struct i915_active *ref)
 {
 	i915_vma_put(active_to_vma(ref));
@@ -976,13 +977,13 @@ int i915_vma_unbind(struct i915_vma *vma)
 		if (ret)
 			return ret;
 	}
-	GEM_BUG_ON(i915_vma_is_active(vma));
 
 	if (i915_vma_is_pinned(vma)) {
 		vma_print_allocator(vma, "is pinned");
 		return -EBUSY;
 	}
 
+	GEM_BUG_ON(i915_vma_is_active(vma));
 	if (!drm_mm_node_allocated(&vma->node))
 		return 0;
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list