[PATCH 49/49] drm/i915: Keep contexts pinned until after the next kernel context switch

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 5 19:00:59 UTC 2019


We need to keep the context image pinned in memory until after the GPU
has finished writing into it. Since it continues to write as we signal
the final breadcrumb, we need to keep it pinned until the request after
it is complete. Currently we know the order in which requests execute on
each engine, and so to remove that presumption we need to identify a
request/context-switch we know must occur after our completion. Any
request queued after the signal must imply a context switch, for
simplicity we use a fresh request from the kernel context.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c            | 70 ++++++++++++++++++
 drivers/gpu/drm/i915/i915_active.h            |  4 ++
 drivers/gpu/drm/i915/i915_drv.h               |  1 +
 drivers/gpu/drm/i915/i915_gem.c               | 45 +++++++-----
 drivers/gpu/drm/i915/i915_gem_context.c       | 17 +----
 drivers/gpu/drm/i915/i915_gem_context.h       |  1 -
 drivers/gpu/drm/i915/i915_gem_evict.c         | 25 +++----
 drivers/gpu/drm/i915/i915_request.c           | 15 ----
 drivers/gpu/drm/i915/i915_reset.c             |  5 +-
 drivers/gpu/drm/i915/intel_context.c          | 14 ++--
 drivers/gpu/drm/i915/intel_context.h          |  2 +
 drivers/gpu/drm/i915/intel_context_types.h    |  2 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        | 39 +++++-----
 drivers/gpu/drm/i915/intel_engine_types.h     | 12 +---
 drivers/gpu/drm/i915/intel_lrc.c              | 71 +++++++++----------
 drivers/gpu/drm/i915/intel_ringbuffer.c       | 39 +++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  2 -
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  5 --
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  1 -
 19 files changed, 204 insertions(+), 166 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 846900535d10..04ff7d51b8a7 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -247,6 +247,76 @@ void i915_active_fini(struct i915_active *ref)
 }
 #endif
 
+int i915_active_acquire_barrier(struct i915_active *ref,
+				struct intel_engine_cs *engine)
+{
+	struct drm_i915_private *i915 = engine->i915;
+	unsigned long tmp;
+	int err = 0;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	i915->gt.idle_barriers |= engine->mask;
+
+	i915_active_acquire(ref);
+	for_each_engine_masked(engine, i915, engine->mask, tmp) {
+		struct intel_context *kctx =
+			intel_context_lookup(i915->kernel_context, engine);
+		u64 idx = kctx->ring->timeline->fence_context;
+		struct rb_node **p, *parent;
+		struct active_node *node;
+
+		parent = NULL;
+		p = &ref->tree.rb_node;
+		while (*p) {
+			parent = *p;
+
+			node = rb_entry(parent, struct active_node, node);
+			if (node->timeline == idx)
+				goto replace;
+
+			if (node->timeline < idx)
+				p = &parent->rb_right;
+			else
+				p = &parent->rb_left;
+		}
+
+		/* Think before you shrink! */
+		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+		if (unlikely(!node)) {
+			err = -ENOMEM;
+			break;
+		}
+
+		i915_active_request_init(&node->base, NULL, node_retire);
+		node->ref = ref;
+		node->timeline = idx;
+
+		rb_link_node(&node->node, parent, p);
+		rb_insert_color(&node->node, &ref->tree);
+
+replace:
+		ref->count += !node->base.request;
+		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+		list_move_tail(&node->base.link, &engine->barrier_tasks);
+	}
+	i915_active_release(ref);
+
+	return err;
+}
+
+void i915_request_add_barriers(struct i915_request *rq)
+{
+	struct intel_engine_cs *engine = rq->engine;
+
+	lockdep_assert_held(&rq->i915->drm.struct_mutex);
+
+	GEM_BUG_ON(!is_power_of_2(engine->mask));
+	if (!__test_and_clear_bit(engine->id, &engine->i915->gt.idle_barriers))
+		return;
+
+	list_splice_tail_init(&engine->barrier_tasks, &rq->active_list);
+}
+
 int i915_active_request_set(struct i915_active_request *active,
 			    struct i915_request *rq)
 {
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index a049ccd478c6..431af010ec96 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -406,6 +406,10 @@ void i915_active_fini(struct i915_active *ref);
 static inline void i915_active_fini(struct i915_active *ref) { }
 #endif
 
+int i915_active_acquire_barrier(struct i915_active *ref,
+				struct intel_engine_cs *engine);
+void i915_request_add_barriers(struct i915_request *rq);
+
 int i915_global_active_init(void);
 void i915_global_active_shrink(void);
 void i915_global_active_exit(void);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6be5dba889b5..93aea23a8545 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1984,6 +1984,7 @@ struct drm_i915_private {
 		struct list_head active_rings;
 		struct list_head closed_vma;
 		unsigned long active_engines;
+		unsigned long idle_barriers;
 		u32 active_requests;
 
 		/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ca5e8f87f166..afeee2ca1422 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -109,6 +109,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(i915->gt.active_requests);
+	GEM_BUG_ON(i915->gt.idle_barriers);
 	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
 
 	if (!i915->gt.awake)
@@ -2847,14 +2848,14 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.retire_work.work);
-	struct drm_device *dev = &dev_priv->drm;
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), gt.retire_work.work);
 
 	/* Come back later if the device is busy... */
-	if (mutex_trylock(&dev->struct_mutex)) {
-		i915_retire_requests(dev_priv);
-		mutex_unlock(&dev->struct_mutex);
+	if (mutex_trylock(&i915->drm.struct_mutex)) {
+		i915_gem_switch_to_kernel_context(i915, i915->gt.idle_barriers);
+		i915_retire_requests(i915);
+		mutex_unlock(&i915->drm.struct_mutex);
 	}
 
 	/*
@@ -2862,9 +2863,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	 * We do not need to do this test under locking as in the worst-case
 	 * we queue the retire worker once too often.
 	 */
-	if (READ_ONCE(dev_priv->gt.awake))
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.retire_work,
+	if (READ_ONCE(i915->gt.awake))
+		queue_delayed_work(i915->wq,
+				   &i915->gt.retire_work,
 				   round_jiffies_up_relative(HZ));
 }
 
@@ -2928,8 +2929,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
 	}
 }
 
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
-					  unsigned long mask)
+static bool __switch_to_kernel_context_sync(struct drm_i915_private *i915,
+					    unsigned long mask)
 {
 	if (i915_gem_switch_to_kernel_context(i915, mask))
 		return false;
@@ -2943,10 +2944,22 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
 	return true;
 }
 
+static bool switch_to_kernel_context_idle(struct drm_i915_private *i915,
+					  unsigned long mask)
+{
+	if (!__switch_to_kernel_context_sync(i915, mask))
+		return false;
+
+	if (!__switch_to_kernel_context_sync(i915, i915->gt.idle_barriers))
+		return false;
+
+	return true;
+}
+
 static bool load_power_context(struct drm_i915_private *i915)
 {
 	/* Force loading the kernel context on all engines */
-	if (!switch_to_kernel_context_sync(i915, -1))
+	if (!switch_to_kernel_context_idle(i915, -1))
 		return false;
 
 	/*
@@ -2995,7 +3008,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	if (!gt->active_requests && !work_pending(&gt->idle_work.work)) {
 		++gt->active_requests; /* don't requeue idle */
 
-		if (!switch_to_kernel_context_sync(i915,
+		if (!switch_to_kernel_context_idle(i915,
 						   i915->gt.active_engines)) {
 			dev_err(i915->drm.dev,
 				"Failed to idle engines, declaring wedged!\n");
@@ -4444,10 +4457,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 
 	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
 	intel_runtime_pm_put(i915, wakeref);
-
-	mutex_lock(&i915->drm.struct_mutex);
-	i915_gem_contexts_lost(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
 }
 
 void i915_gem_suspend(struct drm_i915_private *i915)
@@ -4472,7 +4481,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	 * state. Fortunately, the kernel_context is disposable and we do
 	 * not rely on its state.
 	 */
-	if (!switch_to_kernel_context_sync(i915, i915->gt.active_engines)) {
+	if (!switch_to_kernel_context_idle(i915, i915->gt.active_engines)) {
 		/* Forcibly cancel outstanding work and leave the gpu quiet. */
 		i915_gem_set_wedged(i915);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index bb3c6a7faba9..fd9534abc0f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -639,17 +639,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-	for_each_engine(engine, dev_priv, id)
-		intel_engine_lost_context(engine);
-}
-
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
@@ -932,6 +921,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
 							 I915_FENCE_GFP);
 		}
 
+		i915_request_add_barriers(rq);
 		i915_request_add(rq);
 	}
 
@@ -1125,9 +1115,8 @@ gen8_modify_rpcs_gpu(struct intel_context *ce,
 	 * But we only need to take one pin on the account of it. Or in other
 	 * words transfer the pinned ce object to tracked active request.
 	 */
-	if (!i915_active_request_isset(&ce->active_tracker))
-		__intel_context_pin(ce);
-	__i915_active_request_set(&ce->active_tracker, rq);
+	GEM_BUG_ON(i915_active_is_idle(&ce->active_tracker));
+	ret = i915_active_ref(&ce->active_tracker, rq->fence.context, rq);
 
 out_add:
 	i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index b391aa1f55ae..1c7b3a4122b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -123,7 +123,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 
 /* i915_gem_context.c */
 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 060f5903544a..43f61d7eb1d0 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 
 static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-	return !i915->gt.active_requests;
+	return !i915->gt.active_requests && !i915->gt.idle_barriers;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
@@ -52,19 +52,14 @@ static int ggtt_flush(struct drm_i915_private *i915)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
+	err = i915_gem_switch_to_kernel_context(i915, i915->gt.idle_barriers);
 	if (err)
 		return err;
 
-	err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_INTERRUPTIBLE |
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
-	if (err)
-		return err;
-
-	GEM_BUG_ON(!ggtt_is_idle(i915));
-	return 0;
+	return i915_gem_wait_for_idle(i915,
+				      I915_WAIT_INTERRUPTIBLE |
+				      I915_WAIT_LOCKED,
+				      MAX_SCHEDULE_TIMEOUT);
 }
 
 static bool
@@ -414,9 +409,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	 * switch otherwise is ineffective.
 	 */
 	if (i915_is_ggtt(vm)) {
-		ret = ggtt_flush(vm->i915);
-		if (ret)
-			return ret;
+		do {
+			ret = ggtt_flush(vm->i915);
+			if (ret)
+				return ret;
+		} while (!ggtt_is_idle(vm->i915));
 	}
 
 	INIT_LIST_HEAD(&eviction_list);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 53dbe2964d90..f2a8da045470 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -221,18 +221,6 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
 	spin_unlock(&rq->lock);
 
 	local_irq_enable();
-
-	/*
-	 * The backing object for the context is done after switching to the
-	 * *next* context. Therefore we cannot retire the previous context until
-	 * the next context has already started running. However, since we
-	 * cannot take the required locks at i915_request_submit() we
-	 * defer the unpinning of the active context to now, retirement of
-	 * the subsequent request.
-	 */
-	if (engine->last_retired_context)
-		intel_context_unpin(engine->last_retired_context);
-	engine->last_retired_context = rq->hw_context;
 }
 
 static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -760,9 +748,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	if (ret)
 		goto err_unwind;
 
-	/* Keep a second pin for the dual retirement along engine and ring */
-	__intel_context_pin(ce);
-
 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
 
 	/* Check that we didn't interrupt ourselves with a new request */
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 78c9689629a0..51cc730e6ccb 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -758,9 +758,8 @@ static void reset_restart(struct drm_i915_private *i915)
 
 	/*
 	 * Following the reset, ensure that we always reload context for
-	 * powersaving, and to correct engine->last_retired_context. Since
-	 * this requires us to submit a request, queue a worker to do that
-	 * task for us to evade any locking here.
+	 * powersaving. Since this requires us to submit a request, queue
+	 * a worker to do that task for us to evade any locking here.
 	 */
 	if (READ_ONCE(i915->gpu_error.restart))
 		return;
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
index 81cc29a95e07..88a1eddad0a2 100644
--- a/drivers/gpu/drm/i915/intel_context.c
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -109,13 +109,17 @@ intel_context_instance(struct i915_gem_context *ctx,
 	return pos;
 }
 
-static void intel_context_retire(struct i915_active_request *active,
-				 struct i915_request *rq)
+void intel_context_retire(struct i915_active *active)
 {
 	struct intel_context *ce =
 		container_of(active, typeof(*ce), active_tracker);
 
-	intel_context_unpin(ce);
+	if (ce->state) {
+		ce->state->obj->pin_global--;
+		i915_vma_unpin(ce->state);
+	}
+
+	i915_gem_context_put(ce->gem_context);
 }
 
 void
@@ -132,7 +136,5 @@ intel_context_init(struct intel_context *ce,
 	/* Use the whole device by default */
 	ce->sseu = intel_device_default_sseu(ctx->i915);
 
-	i915_active_request_init(&ce->active_tracker,
-				 NULL, intel_context_retire);
+	i915_active_init(ctx->i915, &ce->active_tracker, intel_context_retire);
 }
-
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
index c3fffd9b8ae4..9201fab8e29d 100644
--- a/drivers/gpu/drm/i915/intel_context.h
+++ b/drivers/gpu/drm/i915/intel_context.h
@@ -67,4 +67,6 @@ static inline void intel_context_unpin(struct intel_context *ce)
 	ce->ops->unpin(ce);
 }
 
+void intel_context_retire(struct i915_active *active);
+
 #endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
index 857f5c335324..7cc07d36db3c 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -50,7 +50,7 @@ struct intel_context {
 	 * active_tracker: Active tracker for the external rq activity
 	 * on this intel_context object.
 	 */
-	struct i915_active_request active_tracker;
+	struct i915_active active_tracker;
 
 	const struct intel_context_ops *ops;
 	struct rb_node node;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a5b2d50208ef..3b3ccffad648 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -575,6 +575,8 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
 {
 	int err;
 
+	INIT_LIST_HEAD(&engine->barrier_tasks);
+
 	err = init_status_page(engine);
 	if (err)
 		return err;
@@ -756,6 +758,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	return ret;
 }
 
+static void call_barrier_tasks(struct intel_engine_cs *engine)
+{
+	struct i915_active_request *task, *next;
+
+	list_for_each_entry_safe(task, next, &engine->barrier_tasks, link) {
+		INIT_LIST_HEAD(&task->link);
+		RCU_INIT_POINTER(task->request, NULL);
+		task->retire(task, NULL);
+	}
+	INIT_LIST_HEAD(&engine->barrier_tasks);
+}
+
 /**
  * intel_engines_cleanup_common - cleans up the engine state created by
  *                                the common initiailizers.
@@ -776,10 +790,15 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->default_state)
 		i915_gem_object_put(engine->default_state);
 
+	GEM_BUG_ON(!list_empty(&engine->barrier_tasks));
+
 	if (i915->preempt_context)
 		__intel_context_unpin(i915->preempt_context, engine);
 	__intel_context_unpin(i915->kernel_context, engine);
 
+	/* The only tasks that remain should be the kernel context unpin */
+	call_barrier_tasks(engine);
+
 	i915_timeline_fini(&engine->timeline);
 
 	intel_wa_list_free(&engine->ctx_wa_list);
@@ -1208,26 +1227,6 @@ void intel_engines_unpark(struct drm_i915_private *i915)
 	}
 }
 
-/**
- * intel_engine_lost_context: called when the GPU is reset into unknown state
- * @engine: the engine
- *
- * We have either reset the GPU or otherwise about to lose state tracking of
- * the current GPU logical state (e.g. suspend). On next use, it is therefore
- * imperative that we make no presumptions about the current state and load
- * from scratch.
- */
-void intel_engine_lost_context(struct intel_engine_cs *engine)
-{
-	struct intel_context *ce;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	ce = fetch_and_zero(&engine->last_retired_context);
-	if (ce)
-		intel_context_unpin(ce);
-}
-
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
 	switch (INTEL_GEN(engine->i915)) {
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
index 86a57c594428..f4edf9ae6b61 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -265,6 +265,7 @@ struct intel_engine_cs {
 	struct intel_ring *buffer;
 
 	struct i915_timeline timeline;
+	struct list_head barrier_tasks;
 
 	struct drm_i915_gem_object *default_state;
 	void *pinned_default_state;
@@ -405,17 +406,6 @@ struct intel_engine_cs {
 
 	struct intel_engine_execlists execlists;
 
-	/* Contexts are pinned whilst they are active on the GPU. The last
-	 * context executed remains active whilst the GPU is idle - the
-	 * switch away and write to the context object only occurs on the
-	 * next execution.  Contexts are only unpinned on retirement of the
-	 * following request ensuring that we can always write to the object
-	 * on the context switch even after idling. Across suspend, we switch
-	 * to the kernel context and trash it as the save may not happen
-	 * before the hardware is powered down.
-	 */
-	struct intel_context *last_retired_context;
-
 	/* status_notifier: list of callbacks for context-switch changes */
 	struct atomic_notifier_head context_status_notifier;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3ef0cd105c52..813b317b95cb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1444,38 +1444,15 @@ static void execlists_context_destroy(struct intel_context *ce)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
-	struct intel_engine_cs *engine;
-
-	/*
-	 * The tasklet may still be using a pointer to our state, via an
-	 * old request. However, since we know we only unpin the context
-	 * on retirement of the following request, we know that the last
-	 * request referencing us will have had a completion CS interrupt.
-	 * If we see that it is still active, it means that the tasklet hasn't
-	 * had the chance to run yet; let it run before we teardown the
-	 * reference it may use.
-	 */
-	engine = READ_ONCE(ce->active);
-	if (unlikely(engine)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&engine->timeline.lock, flags);
-		process_csb(engine);
-		spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
-		GEM_BUG_ON(READ_ONCE(ce->active));
-	}
-
 	i915_gem_context_unpin_hw_id(ce->gem_context);
 
 	intel_ring_unpin(ce->ring);
 
-	ce->state->obj->pin_global--;
 	i915_gem_object_unpin_map(ce->state->obj);
-	i915_vma_unpin(ce->state);
 
+	i915_active_acquire_barrier(&ce->active_tracker, ce->engine);
+	i915_active_release(&ce->active_tracker);
 	list_del(&ce->active_link);
-	i915_gem_context_put(ce->gem_context);
 }
 
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
@@ -1507,6 +1484,9 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
 	u32 *regs = ce->lrc_reg_state;
 	struct intel_ring *ring = ce->ring;
 
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
 	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
 	regs[CTX_RING_HEAD + 1] = ring->head;
 	regs[CTX_RING_TAIL + 1] = ring->tail;
@@ -1531,9 +1511,22 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 		goto err;
 	GEM_BUG_ON(!ce->state);
 
-	ret = __context_pin(ctx, ce->state);
-	if (ret)
-		goto err;
+	if (i915_active_acquire(&ce->active_tracker)) {
+		ret = __context_pin(ctx, ce->state);
+		if (ret) {
+			i915_active_cancel(&ce->active_tracker);
+			goto err;
+		}
+
+		ce->state->obj->pin_global++;
+		i915_gem_context_get(ctx);
+
+		/* Preallocate tracking nodes */
+		ret = i915_active_acquire_barrier(&ce->active_tracker,
+						  ce->engine);
+		if (ret)
+			goto unpin_vma;
+	}
 
 	vaddr = i915_gem_object_pin_map(ce->state->obj,
 					i915_coherent_map_type(ctx->i915) |
@@ -1553,14 +1546,10 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 
 	intel_lr_context_descriptor_update(ctx, engine, ce);
 
-	GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
 	__execlists_update_reg_state(engine, ce);
 
-	ce->state->obj->pin_global++;
-	i915_gem_context_get(ctx);
 	list_add(&ce->active_link, &ctx->active_engines);
 	return ce;
 
@@ -1569,7 +1558,7 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 unpin_map:
 	i915_gem_object_unpin_map(ce->state->obj);
 unpin_vma:
-	__i915_vma_unpin(ce->state);
+	i915_active_release(&ce->active_tracker);
 err:
 	ce->pin_count = 0;
 	return ERR_PTR(ret);
@@ -3114,13 +3103,20 @@ static void virtual_engine_free(struct kref *kref)
 	kfree(ve);
 }
 
+static void virtual_context_retire(struct i915_active *ref)
+{
+	struct virtual_engine *ve =
+		container_of(ref, typeof(*ve), context.active_tracker);
+
+	intel_context_retire(ref);
+	kref_put(&ve->kref, virtual_engine_free);
+}
+
 static void virtual_context_unpin(struct intel_context *ce)
 {
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 
 	execlists_context_unpin(ce);
-
-	kref_put(&ve->kref, virtual_engine_free);
 }
 
 static const struct intel_context_ops virtual_context_ops = {
@@ -3166,7 +3162,9 @@ virtual_context_pin(struct intel_engine_cs *engine,
 		return ce;
 	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-	kref_get(&ve->kref);
+	if (i915_active_is_idle(&ce->active_tracker))
+		kref_get(&ve->kref);
+
 	ce->ops = &virtual_context_ops;
 
 	/* Note: we must use a real engine class for setting up reg state */
@@ -3329,6 +3327,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 	i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL);
 
 	intel_context_init(&ve->context, ctx, &ve->base);
+	ve->context.active_tracker.retire = virtual_context_retire;
 	__intel_context_insert(ctx, &ve->base, &ve->context);
 
 	ve->base.context_pin = virtual_context_pin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6b2207cd37d9..729d1947164f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1426,25 +1426,14 @@ static int __context_pin(struct intel_context *ce)
 	return 0;
 }
 
-static void __context_unpin(struct intel_context *ce)
-{
-	struct i915_vma *vma;
-
-	vma = ce->state;
-	if (!vma)
-		return;
-
-	vma->obj->pin_global--;
-	i915_vma_unpin(vma);
-}
-
 static void intel_ring_context_unpin(struct intel_context *ce)
 {
 	__context_unpin_ppgtt(ce->gem_context);
-	__context_unpin(ce);
 
+	if (ce->state)
+		i915_active_acquire_barrier(&ce->active_tracker, ce->engine);
+	i915_active_release(&ce->active_tracker);
 	list_del(&ce->active_link);
-	i915_gem_context_put(ce->gem_context);
 }
 
 static struct i915_vma *
@@ -1535,15 +1524,27 @@ __ring_context_pin(struct intel_engine_cs *engine,
 		ce->state = vma;
 	}
 
-	err = __context_pin(ce);
-	if (err)
-		goto err;
+	if (i915_active_acquire(&ce->active_tracker)) {
+		err = __context_pin(ce);
+		if (err) {
+			i915_active_cancel(&ce->active_tracker);
+			goto err;
+		}
+
+		i915_gem_context_get(ctx);
+
+		if (ce->state) { /* Preallocate tracking nodes */
+			err = i915_active_acquire_barrier(&ce->active_tracker,
+							  ce->engine);
+			if (err)
+				goto err_unpin;
+		}
+	}
 
 	err = __context_pin_ppgtt(ce->gem_context);
 	if (err)
 		goto err_unpin;
 
-	i915_gem_context_get(ctx);
 	list_add(&ce->active_link, &ctx->active_engines);
 
 	/* One ringbuffer to rule them all */
@@ -1553,7 +1554,7 @@ __ring_context_pin(struct intel_engine_cs *engine,
 	return ce;
 
 err_unpin:
-	__context_unpin(ce);
+	i915_active_release(&ce->active_tracker);
 err:
 	ce->pin_count = 0;
 	return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b816b9be7245..67736e31826c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -424,8 +424,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
-void intel_engine_lost_context(struct intel_engine_cs *engine);
-
 void intel_engines_park(struct drm_i915_private *i915);
 void intel_engines_unpark(struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 8f72d26c58fe..9555042a4eb7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -287,14 +287,9 @@ void mock_engine_free(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
 		container_of(engine, typeof(*mock), base);
-	struct intel_context *ce;
 
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-	ce = fetch_and_zero(&engine->last_retired_context);
-	if (ce)
-		intel_context_unpin(ce);
-
 	__intel_context_unpin(engine->i915->kernel_context, engine);
 
 	intel_engine_fini_breadcrumbs(engine);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index d00679e21415..be5e871be26b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -55,7 +55,6 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_device_flush(i915);
-	i915_gem_contexts_lost(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	drain_delayed_work(&i915->gt.retire_work);
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list