[PATCH 46/59] drm/i915: Avoid presumption of execution ordering for kernel context switching

Chris Wilson chris at chris-wilson.co.uk
Sat Dec 29 13:26:15 UTC 2018


For future GuC implementations, the execution order of individual
requests will be opaque. As such we will not have a single execution
timeline and will not know the last request/context to be run on each
engine. The major consequence for this is that we do not know which
context is still volatile on the HW and which have been saved and can be
swapped out. The only point at which we can know is after a synchronous
switch to the kernel context which we perform on idling. So we must keep
each context pinned from its first use until the next time we idle the
GPU. One consequence of this is that context eviction requires switching
to the kernel context and idling!

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h               |   1 +
 drivers/gpu/drm/i915/i915_gem.c               | 149 ++++++------------
 drivers/gpu/drm/i915/i915_gem_context.c       | 123 ++++++---------
 drivers/gpu/drm/i915/i915_gem_context.h       |  15 +-
 drivers/gpu/drm/i915/i915_gem_evict.c         |  32 +---
 drivers/gpu/drm/i915/i915_request.c           |  40 ++---
 drivers/gpu/drm/i915/i915_request.h           |   1 +
 drivers/gpu/drm/i915/intel_engine_cs.c        |  60 +------
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  14 --
 drivers/gpu/drm/i915/selftests/i915_gem.c     |   4 +-
 .../gpu/drm/i915/selftests/i915_gem_context.c |  53 ++-----
 .../gpu/drm/i915/selftests/i915_gem_evict.c   | 148 -----------------
 .../gpu/drm/i915/selftests/igt_flush_test.c   |   2 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |   2 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |   8 +-
 .../drm/i915/selftests/intel_workarounds.c    |   2 +-
 drivers/gpu/drm/i915/selftests/mock_context.c |   1 +
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  11 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  13 +-
 19 files changed, 187 insertions(+), 492 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 05b8b4cfd0eb..30d5866cc747 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1962,6 +1962,7 @@ struct drm_i915_private {
 		struct i915_vma *timeline_hwsp;
 		u64 timeline_free;
 
+		struct list_head active_contexts;
 		struct list_head active_rings;
 		struct list_head closed_vma;
 		u32 active_requests;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bade8b0f88f0..4c77bb4f06d2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -169,6 +169,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	synchronize_irq(i915->drm.irq);
 
 	intel_engines_park(i915);
+	intel_contexts_park(i915);
 	i915_timelines_park(i915);
 
 	i915_pmu_gt_parked(i915);
@@ -3137,43 +3138,23 @@ static void __sleep_rcu(struct rcu_head *rcu)
 	}
 }
 
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
-	return (READ_ONCE(i915->gt.active_requests) ||
-		work_pending(&i915->gt.idle_work.work));
-}
-
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	if (i915_terminally_wedged(&i915->gpu_error))
-		return;
-
-	GEM_BUG_ON(i915->gt.active_requests);
-	for_each_engine(engine, i915, id) {
-		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
-		GEM_BUG_ON(engine->last_retired_context !=
-			   to_intel_context(i915->kernel_context, engine));
-	}
-}
-
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.idle_work.work);
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), gt.idle_work.work);
 	unsigned int epoch = I915_EPOCH_INVALID;
 	bool rearm_hangcheck;
 
-	if (!READ_ONCE(dev_priv->gt.awake))
+	if (!READ_ONCE(i915->gt.awake))
 		return;
 
-	if (READ_ONCE(dev_priv->gt.active_requests))
+	if (READ_ONCE(i915->gt.active_requests))
 		return;
 
+	rearm_hangcheck =
+		cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+
 	/*
 	 * Flush out the last user context, leaving only the pinned
 	 * kernel context resident. When we are idling on the kernel_context,
@@ -3182,56 +3163,40 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	 * always called at least twice before idling (and if the system is
 	 * idle that implies a round trip through the retire worker).
 	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_gem_switch_to_kernel_context(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
-		  READ_ONCE(dev_priv->gt.active_requests));
-
-	/*
-	 * Wait for last execlists context complete, but bail out in case a
-	 * new request is submitted. As we don't trust the hardware, we
-	 * continue on if the wait times out. This is necessary to allow
-	 * the machine to suspend even if the hardware dies, and we will
-	 * try to recover in resume (after depriving the hardware of power,
-	 * it may be in a better mmod).
-	 */
-	__wait_for(if (new_requests_since_last_retire(dev_priv)) return,
-		   intel_engines_are_idle(dev_priv),
-		   I915_IDLE_ENGINES_TIMEOUT * 1000,
-		   10, 500);
-
-	rearm_hangcheck =
-		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
-	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+	if (!mutex_trylock(&i915->drm.struct_mutex)) {
 		/* Currently busy, come back later */
-		mod_delayed_work(dev_priv->wq,
-				 &dev_priv->gt.idle_work,
+		mod_delayed_work(i915->wq,
+				 &i915->gt.idle_work,
 				 msecs_to_jiffies(50));
 		goto out_rearm;
 	}
 
-	/*
-	 * New request retired after this work handler started, extend active
-	 * period until next instance of the work.
-	 */
-	if (new_requests_since_last_retire(dev_priv))
-		goto out_unlock;
+	if (!i915->gt.active_requests &&
+	    !work_pending(&i915->gt.idle_work.work)) {
+		++i915->gt.active_requests; /* don't requeue idle! */
 
-	epoch = __i915_gem_park(dev_priv);
+		if (i915_gem_switch_to_kernel_context_sync(i915,
+							   I915_WAIT_LOCKED,
+							   HZ / 10)) {
+			dev_err(i915->drm.dev,
+				"Failed to idle engines, declaring wedged!\n");
+			GEM_TRACE_DUMP();
+			i915_gem_set_wedged(i915);
+			i915_retire_requests(i915);
+		}
 
-	assert_kernel_context_is_current(dev_priv);
+		if (!--i915->gt.active_requests) {
+			epoch = __i915_gem_park(i915);
+			rearm_hangcheck = false;
+		}
+	}
 
-	rearm_hangcheck = false;
-out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 out_rearm:
 	if (rearm_hangcheck) {
-		GEM_BUG_ON(!dev_priv->gt.awake);
-		i915_queue_hangcheck(dev_priv);
+		GEM_BUG_ON(!i915->gt.awake);
+		i915_queue_hangcheck(i915);
 	}
 
 	/*
@@ -3242,11 +3207,11 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	 * period, and then queue a task (that will run last on the wq) to
 	 * shrink and re-optimize the caches.
 	 */
-	if (same_epoch(dev_priv, epoch)) {
+	if (same_epoch(i915, epoch)) {
 		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
 		if (s) {
 			init_rcu_head(&s->rcu);
-			s->i915 = dev_priv;
+			s->i915 = i915;
 			s->epoch = epoch;
 			call_rcu(&s->rcu, __sleep_rcu);
 		}
@@ -3451,6 +3416,9 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
 
 		i915_retire_requests(i915);
 		GEM_BUG_ON(i915->gt.active_requests);
+
+		if (flags & I915_WAIT_FOR_IDLE_PARK)
+			__i915_gem_park(i915);
 	}
 
 	return 0;
@@ -4656,10 +4624,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 
 	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
 	intel_runtime_pm_put(i915, wakeref);
-
-	mutex_lock(&i915->drm.struct_mutex);
-	i915_gem_contexts_lost(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
 }
 
 int i915_gem_suspend(struct drm_i915_private *i915)
@@ -4686,15 +4650,11 @@ int i915_gem_suspend(struct drm_i915_private *i915)
 	 * not rely on its state.
 	 */
 	if (!i915_terminally_wedged(&i915->gpu_error)) {
-		ret = i915_gem_switch_to_kernel_context(i915);
-		if (ret)
-			goto err_unlock;
-
-		ret = i915_gem_wait_for_idle(i915,
-					     I915_WAIT_INTERRUPTIBLE |
-					     I915_WAIT_LOCKED |
-					     I915_WAIT_FOR_IDLE_BOOST,
-					     HZ / 10);
+		ret = i915_gem_switch_to_kernel_context_sync(i915,
+							     I915_WAIT_LOCKED |
+							     I915_WAIT_INTERRUPTIBLE |
+							     I915_WAIT_FOR_IDLE_BOOST,
+							     HZ / 10);
 		if (ret == -EINTR)
 			goto err_unlock;
 
@@ -4794,7 +4754,9 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	intel_uc_resume(i915);
 
 	/* Always reload a context for powersaving. */
-	if (i915_gem_switch_to_kernel_context(i915))
+	if (i915_gem_switch_to_kernel_context_sync(i915,
+						   I915_WAIT_LOCKED,
+						   MAX_SCHEDULE_TIMEOUT))
 		goto err_wedged;
 
 out_unlock:
@@ -4990,18 +4952,14 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 			goto err_active;
 	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		goto err_active;
-
-	if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+	if (i915_gem_switch_to_kernel_context_sync(i915,
+						   I915_WAIT_LOCKED,
+						   HZ / 5)) {
 		i915_gem_set_wedged(i915);
 		err = -EIO; /* Caller will declare us wedged */
 		goto err_active;
 	}
 
-	assert_kernel_context_is_current(i915);
-
 	/*
 	 * Immediately park the GPU so that we enable powersaving and
 	 * treat it as idle. The next time we issue a request, we will
@@ -5079,15 +5037,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	 * request, ensure we are pointing at the kernel context and
 	 * then remove it.
 	 */
-	if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
-		goto out_ctx;
-
-	if (WARN_ON(i915_gem_wait_for_idle(i915,
-					   I915_WAIT_LOCKED,
-					   MAX_SCHEDULE_TIMEOUT)))
-		goto out_ctx;
+	if (i915_gem_switch_to_kernel_context_sync(i915,
+						   I915_WAIT_LOCKED,
+						   HZ / 10))
+		i915_gem_set_wedged(i915);
 
-	i915_gem_contexts_lost(i915);
 	goto out_ctx;
 }
 
@@ -5428,6 +5382,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 
 	i915_timelines_init(dev_priv);
 
+	INIT_LIST_HEAD(&dev_priv->gt.active_contexts);
 	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
 	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4714568bce97..5920611ab17d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -343,6 +343,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 		struct intel_context *ce = &ctx->__engine[n];
 
 		ce->gem_context = ctx;
+		INIT_LIST_HEAD(&ce->active_link);
 		INIT_LIST_HEAD(&ce->signal_link);
 		INIT_LIST_HEAD(&ce->signals);
 	}
@@ -577,17 +578,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-	for_each_engine(engine, dev_priv, id)
-		intel_engine_lost_context(engine);
-}
-
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
@@ -661,84 +651,37 @@ last_request_on_engine(struct i915_timeline *timeline,
 	return NULL;
 }
 
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *i915 = engine->i915;
-	const struct intel_context * const ce =
-		to_intel_context(i915->kernel_context, engine);
-	struct i915_timeline *barrier = ce->ring->timeline;
-	struct intel_ring *ring;
-	bool any_active = false;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-	list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
-		struct i915_request *rq;
-
-		rq = last_request_on_engine(ring->timeline, engine);
-		if (!rq)
-			continue;
-
-		any_active = true;
-
-		if (rq->hw_context == ce)
-			continue;
-
-		/*
-		 * Was this request submitted after the previous
-		 * switch-to-kernel-context?
-		 */
-		if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
-			GEM_TRACE("%s needs barrier for %llx:%lld\n",
-				  ring->timeline->name,
-				  rq->fence.context,
-				  rq->fence.seqno);
-			return false;
-		}
-
-		GEM_TRACE("%s has barrier after %llx:%lld\n",
-			  ring->timeline->name,
-			  rq->fence.context,
-			  rq->fence.seqno);
-	}
-
-	/*
-	 * If any other timeline was still active and behind the last barrier,
-	 * then our last switch-to-kernel-context must still be queued and
-	 * will run last (leaving the engine in the kernel context when it
-	 * eventually idles).
-	 */
-	if (any_active)
-		return true;
-
-	/* The engine is idle; check that it is idling in the kernel context. */
-	return engine->last_retired_context == ce;
-}
-
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context_sync(struct drm_i915_private *i915,
+					   unsigned int flags,
+					   unsigned long timeout)
 {
+	struct i915_request *wait[I915_NUM_ENGINES] = {};
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-
-	GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
+	int err = 0;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915->kernel_context);
+	GEM_BUG_ON(!(flags & I915_WAIT_LOCKED));
 
-	i915_retire_requests(i915);
+	/* Inoperable, so presume the GPU is pointing into the void! */
+	if (i915_terminally_wedged(&i915->gpu_error))
+		return 0;
 
 	for_each_engine(engine, i915, id) {
 		struct intel_ring *ring;
 		struct i915_request *rq;
 
-		GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
-		if (engine_has_kernel_context_barrier(engine))
+		if (!engine->context_size)
 			continue;
 
-		GEM_TRACE("emit barrier on %s\n", engine->name);
+		GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
 
 		rq = i915_request_alloc(engine, i915->kernel_context);
-		if (IS_ERR(rq))
-			return PTR_ERR(rq);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
 
 		/* Queue this switch after all other activity */
 		list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
@@ -761,10 +704,28 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 			i915_timeline_sync_set(rq->timeline, &prev->fence);
 		}
 
+		if (flags & I915_WAIT_FOR_IDLE_BOOST)
+			gen6_rps_boost(rq, NULL);
+
+		wait[id] = i915_request_get(rq);
 		i915_request_add(rq);
 	}
 
-	return 0;
+	for_each_engine(engine, i915, id) {
+		if (!wait[id])
+			continue;
+
+		if (err == 0) {
+			timeout = i915_request_wait(wait[id], flags, timeout);
+			if (timeout < 0)
+				err = timeout;
+		}
+
+		i915_request_put(wait[id]);
+	}
+
+	i915_retire_requests(i915);
+	return err;
 }
 
 static bool client_is_banned(struct drm_i915_file_private *file_priv)
@@ -1025,6 +986,20 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
 	return err;
 }
 
+void intel_contexts_park(struct drm_i915_private *i915)
+{
+	struct intel_context *ce, *cn;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	list_for_each_entry_safe(ce, cn,
+				 &i915->gt.active_contexts, active_link) {
+		INIT_LIST_HEAD(&ce->active_link);
+		intel_context_unpin(ce);
+	}
+	INIT_LIST_HEAD(&i915->gt.active_contexts);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_context.c"
 #include "selftests/i915_gem_context.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index e5eca29cd373..407f3eed632f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -164,6 +164,7 @@ struct i915_gem_context {
 	struct intel_context {
 		struct i915_gem_context *gem_context;
 		struct intel_engine_cs *active;
+		struct list_head active_link;
 		struct list_head signal_link;
 		struct list_head signals;
 		struct i915_vma *state;
@@ -314,6 +315,12 @@ static inline void __intel_context_pin(struct intel_context *ce)
 	ce->pin_count++;
 }
 
+static inline void __intel_context_unpin(struct intel_context *ce)
+{
+	GEM_BUG_ON(!ce->pin_count);
+	ce->pin_count--;
+}
+
 static inline void intel_context_unpin(struct intel_context *ce)
 {
 	GEM_BUG_ON(!ce->pin_count);
@@ -324,17 +331,19 @@ static inline void intel_context_unpin(struct intel_context *ce)
 	ce->ops->unpin(ce);
 }
 
+void intel_contexts_park(struct drm_i915_private *i915);
+
 /* i915_gem_context.c */
 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
 void i915_gem_context_close(struct drm_file *file);
 
-int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context_sync(struct drm_i915_private *i915,
+					   unsigned int flags,
+					   unsigned long timeout);
 
 void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8ccde5761c2c..16248ba20485 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,24 +33,9 @@
 #include "intel_drv.h"
 #include "i915_trace.h"
 
-I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
-	bool fail_if_busy:1;
-} igt_evict_ctl;)
-
 static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (i915->gt.active_requests)
-	       return false;
-
-       for_each_engine(engine, i915, id) {
-	       if (!intel_engine_has_kernel_context(engine))
-		       return false;
-       }
-
-       return true;
+	return !i915->gt.active_requests;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
@@ -63,14 +48,10 @@ static int ggtt_flush(struct drm_i915_private *i915)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		return err;
-
-	err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_INTERRUPTIBLE |
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
+	err = i915_gem_switch_to_kernel_context_sync(i915,
+						     I915_WAIT_LOCKED |
+						     I915_WAIT_INTERRUPTIBLE,
+						     MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		return err;
 
@@ -224,9 +205,6 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	 * the kernel's there is no more we can evict.
 	 */
 	if (!ggtt_is_idle(dev_priv)) {
-		if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
-			return -EBUSY;
-
 		ret = ggtt_flush(dev_priv);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 0bcf7c024ebd..894d2f91bb1b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -210,18 +210,6 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
 	spin_unlock(&rq->lock);
 
 	local_irq_enable();
-
-	/*
-	 * The backing object for the context is done after switching to the
-	 * *next* context. Therefore we cannot retire the previous context until
-	 * the next context has already started running. However, since we
-	 * cannot take the required locks at i915_request_submit() we
-	 * defer the unpinning of the active context to now, retirement of
-	 * the subsequent request.
-	 */
-	if (engine->last_retired_context)
-		intel_context_unpin(engine->last_retired_context);
-	engine->last_retired_context = rq->hw_context;
 }
 
 static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -294,7 +282,6 @@ static void i915_request_retire(struct i915_request *request)
 
 	/* Retirement decays the ban score as it is a sign of ctx progress */
 	atomic_dec_if_positive(&request->gem_context->ban_score);
-	intel_context_unpin(request->hw_context);
 
 	__retire_engine_upto(request->engine, request);
 
@@ -523,8 +510,15 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * ourselves.
 	 */
 	ce = intel_context_pin(ctx, engine);
-	if (IS_ERR(ce))
-		return ERR_CAST(ce);
+	if (IS_ERR(ce)) {
+		i915_gem_wait_for_idle(i915,
+				       I915_WAIT_LOCKED |
+				       I915_WAIT_FOR_IDLE_PARK,
+				       MAX_SCHEDULE_TIMEOUT);
+		ce = intel_context_pin(ctx, engine);
+		if (IS_ERR(ce))
+			return ERR_CAST(ce);
+	}
 
 	reserve_gt(i915);
 
@@ -638,9 +632,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	if (ret)
 		goto err_unwind;
 
-	/* Keep a second pin for the dual retirement along engine and ring */
-	__intel_context_pin(ce);
-
 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
 
 	/* Check that we didn't interrupt ourselves with a new request */
@@ -835,6 +826,17 @@ void i915_request_skip(struct i915_request *rq, int error)
 	memset(vaddr + head, 0, rq->postfix - head);
 }
 
+static void pin_active_context(struct drm_i915_private *i915,
+			       struct intel_context *ce)
+{
+	if (unlikely(list_empty(&ce->active_link))) {
+		list_add(&ce->active_link, &i915->gt.active_contexts);
+		__intel_context_pin(ce);
+	}
+	__intel_context_unpin(ce);
+	GEM_BUG_ON(!ce->pin_count);
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
@@ -910,6 +912,8 @@ void i915_request_add(struct i915_request *request)
 		GEM_TRACE("marking %s as active\n", ring->timeline->name);
 		list_add(&ring->active_link, &request->i915->gt.active_rings);
 	}
+	pin_active_context(request->i915, request->hw_context);
+
 	request->emitted_jiffies = jiffies;
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index f7741a35f20b..3c7b25a2d004 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -273,6 +273,7 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_PRIORITY	BIT(2) /* small priority bump for the request */
 #define I915_WAIT_ALL		BIT(3) /* used by i915_gem_object_wait() */
 #define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_FOR_IDLE_PARK BIT(5)
 
 /**
  * Returns true if seq1 is later than seq2.
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 5b13d31c3954..08dd17559dcb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -597,8 +597,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static void __intel_context_unpin(struct i915_gem_context *ctx,
-				  struct intel_engine_cs *engine)
+static void context_unpin(struct i915_gem_context *ctx,
+			  struct intel_engine_cs *engine)
 {
 	intel_context_unpin(to_intel_context(ctx, engine));
 }
@@ -659,10 +659,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 err_breadcrumbs:
 	intel_engine_fini_breadcrumbs(engine);
 	if (i915->preempt_context)
-		__intel_context_unpin(i915->preempt_context, engine);
+		context_unpin(i915->preempt_context, engine);
 
 err_unpin_kernel:
-	__intel_context_unpin(i915->kernel_context, engine);
+	context_unpin(i915->kernel_context, engine);
 	return ret;
 }
 
@@ -687,8 +687,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 		i915_gem_object_put(engine->default_state);
 
 	if (i915->preempt_context)
-		__intel_context_unpin(i915->preempt_context, engine);
-	__intel_context_unpin(i915->kernel_context, engine);
+		context_unpin(i915->preempt_context, engine);
+	context_unpin(i915->kernel_context, engine);
 
 	i915_timeline_fini(&engine->timeline);
 
@@ -1002,34 +1002,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
 	return true;
 }
 
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
-	const struct intel_context *kernel_context =
-		to_intel_context(engine->i915->kernel_context, engine);
-	struct i915_request *rq;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	/*
-	 * Check the last context seen by the engine. If active, it will be
-	 * the last request that remains in the timeline. When idle, it is
-	 * the last executed context as tracked by retirement.
-	 */
-	rq = __i915_gem_active_peek(&engine->timeline.last_request);
-	if (rq)
-		return rq->hw_context == kernel_context;
-	else
-		return engine->last_retired_context == kernel_context;
-}
-
 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
@@ -1148,26 +1120,6 @@ void intel_engines_unpark(struct drm_i915_private *i915)
 	}
 }
 
-/**
- * intel_engine_lost_context: called when the GPU is reset into unknown state
- * @engine: the engine
- *
- * We have either reset the GPU or otherwise about to lose state tracking of
- * the current GPU logical state (e.g. suspend). On next use, it is therefore
- * imperative that we make no presumptions about the current state and load
- * from scratch.
- */
-void intel_engine_lost_context(struct intel_engine_cs *engine)
-{
-	struct intel_context *ce;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	ce = fetch_and_zero(&engine->last_retired_context);
-	if (ce)
-		intel_context_unpin(ce);
-}
-
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
 	switch (INTEL_GEN(engine->i915)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 91e833d6a697..63ec13f10663 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -450,17 +450,6 @@ struct intel_engine_cs {
 
 	struct intel_engine_execlists execlists;
 
-	/* Contexts are pinned whilst they are active on the GPU. The last
-	 * context executed remains active whilst the GPU is idle - the
-	 * switch away and write to the context object only occurs on the
-	 * next execution.  Contexts are only unpinned on retirement of the
-	 * following request ensuring that we can always write to the object
-	 * on the context switch even after idling. Across suspend, we switch
-	 * to the kernel context and trash it as the save may not happen
-	 * before the hardware is powered down.
-	 */
-	struct intel_context *last_retired_context;
-
 	/* status_notifier: list of callbacks for context-switch changes */
 	struct atomic_notifier_head context_status_notifier;
 
@@ -922,9 +911,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
-void intel_engine_lost_context(struct intel_engine_cs *engine);
-
 void intel_engines_park(struct drm_i915_private *i915);
 void intel_engines_unpark(struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e77b7ed449ae..378ad0adf2c6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -164,7 +164,7 @@ static int igt_gem_suspend(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = switch_to_context(i915, ctx);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+	if (igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK))
 		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 out:
@@ -205,7 +205,7 @@ static int igt_gem_hibernate(void *arg)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	err = switch_to_context(i915, ctx);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+	if (igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK))
 		err = -EIO;
 	mutex_unlock(&i915->drm.struct_mutex);
 out:
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index f5926aa23e9d..6ca99dda0bcd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -1104,7 +1104,7 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
 {
 	struct intel_engine_cs *engine;
 	unsigned int tmp;
-	int err;
+	int pass;
 
 	GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
 	for_each_engine_masked(engine, i915, engines, tmp) {
@@ -1117,47 +1117,18 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
 		i915_request_add(rq);
 	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		return err;
-
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (!engine_has_kernel_context_barrier(engine)) {
-			pr_err("kernel context not last on engine %s!\n",
-			       engine->name);
-			return -EINVAL;
-		}
-	}
-
-	err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
-	if (err)
-		return err;
-
-	GEM_BUG_ON(i915->gt.active_requests);
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (engine->last_retired_context->gem_context != i915->kernel_context) {
-			pr_err("engine %s not idling in kernel context!\n",
-			       engine->name);
-			return -EINVAL;
-		}
-	}
-
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		return err;
+	for (pass = 0; pass < 2; pass++) { /* Once busy; once idle */
+		int err;
 
-	if (i915->gt.active_requests) {
-		pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
-		       i915->gt.active_requests);
-		return -EINVAL;
-	}
+		err = i915_gem_switch_to_kernel_context_sync(i915,
+							     I915_WAIT_LOCKED,
+							     MAX_SCHEDULE_TIMEOUT);
+		if (err)
+			return err;
 
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (!intel_engine_has_kernel_context(engine)) {
-			pr_err("kernel context not last on engine %s!\n",
-			       engine->name);
+		if (i915->gt.active_requests) {
+			pr_err("%d active requests remain after switching to kernel context\n",
+			       i915->gt.active_requests);
 			return -EINVAL;
 		}
 	}
@@ -1204,7 +1175,7 @@ static int igt_switch_to_kernel_context(void *arg)
 
 out_unlock:
 	GEM_TRACE_DUMP_ON(err);
-	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+	if (igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK))
 		err = -EIO;
 
 	intel_runtime_pm_put(i915, wakeref);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 337aa96908a8..4b3b933c9dc4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -368,153 +368,6 @@ static int igt_evict_vm(void *arg)
 	return err;
 }
 
-static int igt_evict_contexts(void *arg)
-{
-	const u64 PRETEND_GGTT_SIZE = 16ull << 20;
-	struct drm_i915_private *i915 = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	struct reserved {
-		struct drm_mm_node node;
-		struct reserved *next;
-	} *reserved = NULL;
-	intel_wakeref_t wakeref;
-	struct drm_mm_node hole;
-	unsigned long count;
-	int err;
-
-	/*
-	 * The purpose of this test is to verify that we will trigger an
-	 * eviction in the GGTT when constructing a request that requires
-	 * additional space in the GGTT for pinning the context. This space
-	 * is not directly tied to the request so reclaiming it requires
-	 * extra work.
-	 *
-	 * As such this test is only meaningful for full-ppgtt environments
-	 * where the GTT space of the request is separate from the GGTT
-	 * allocation required to build the request.
-	 */
-	if (!HAS_FULL_PPGTT(i915))
-		return 0;
-
-	mutex_lock(&i915->drm.struct_mutex);
-	wakeref = intel_runtime_pm_get(i915);
-
-	/* Reserve a block so that we know we have enough to fit a few rq */
-	memset(&hole, 0, sizeof(hole));
-	err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
-				  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
-				  0, i915->ggtt.vm.total,
-				  PIN_NOEVICT);
-	if (err)
-		goto out_locked;
-
-	/* Make the GGTT appear small by filling it with unevictable nodes */
-	count = 0;
-	do {
-		struct reserved *r;
-
-		r = kcalloc(1, sizeof(*r), GFP_KERNEL);
-		if (!r) {
-			err = -ENOMEM;
-			goto out_locked;
-		}
-
-		if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
-					1ul << 20, 0, I915_COLOR_UNEVICTABLE,
-					0, i915->ggtt.vm.total,
-					PIN_NOEVICT)) {
-			kfree(r);
-			break;
-		}
-
-		r->next = reserved;
-		reserved = r;
-
-		count++;
-	} while (1);
-	drm_mm_remove_node(&hole);
-	mutex_unlock(&i915->drm.struct_mutex);
-	pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
-
-	/* Overfill the GGTT with context objects and so try to evict one. */
-	for_each_engine(engine, i915, id) {
-		struct i915_sw_fence fence;
-		struct drm_file *file;
-
-		file = mock_file(i915);
-		if (IS_ERR(file)) {
-			err = PTR_ERR(file);
-			break;
-		}
-
-		count = 0;
-		mutex_lock(&i915->drm.struct_mutex);
-		onstack_fence_init(&fence);
-		do {
-			struct i915_request *rq;
-			struct i915_gem_context *ctx;
-
-			ctx = live_context(i915, file);
-			if (!ctx)
-				break;
-
-			/* We will need some GGTT space for the rq's context */
-			igt_evict_ctl.fail_if_busy = true;
-			rq = i915_request_alloc(engine, ctx);
-			igt_evict_ctl.fail_if_busy = false;
-
-			if (IS_ERR(rq)) {
-				/* When full, fail_if_busy will trigger EBUSY */
-				if (PTR_ERR(rq) != -EBUSY) {
-					pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
-					       ctx->hw_id, engine->name,
-					       (int)PTR_ERR(rq));
-					err = PTR_ERR(rq);
-				}
-				break;
-			}
-
-			/* Keep every request/ctx pinned until we are full */
-			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
-							       &fence,
-							       GFP_KERNEL);
-			if (err < 0)
-				break;
-
-			i915_request_add(rq);
-			count++;
-			err = 0;
-		} while(1);
-		mutex_unlock(&i915->drm.struct_mutex);
-
-		onstack_fence_fini(&fence);
-		pr_info("Submitted %lu contexts/requests on %s\n",
-			count, engine->name);
-
-		mock_file_free(i915, file);
-		if (err)
-			break;
-	}
-
-	mutex_lock(&i915->drm.struct_mutex);
-out_locked:
-	while (reserved) {
-		struct reserved *next = reserved->next;
-
-		drm_mm_remove_node(&reserved->node);
-		kfree(reserved);
-
-		reserved = next;
-	}
-	if (drm_mm_node_allocated(&hole))
-		drm_mm_remove_node(&hole);
-	intel_runtime_pm_put(i915, wakeref);
-	mutex_unlock(&i915->drm.struct_mutex);
-
-	return err;
-}
-
 int i915_gem_evict_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -542,7 +395,6 @@ int i915_gem_evict_mock_selftests(void)
 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
-		SUBTEST(igt_evict_contexts),
 	};
 
 	if (i915_terminally_wedged(&i915->gpu_error))
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..1264fe9b6c38 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 	cond_resched();
 
 	if (flags & I915_WAIT_LOCKED &&
-	    i915_gem_switch_to_kernel_context(i915)) {
+	    i915_gem_switch_to_kernel_context_sync(i915, flags, HZ / 5)) {
 		pr_err("Failed to switch back to kernel context; declaring wedged\n");
 		i915_gem_set_wedged(i915);
 	}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 3752355a259e..d0a227d63cef 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -1670,7 +1670,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 	err = i915_subtests(tests, i915);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	igt_flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	i915_modparams.enable_hangcheck = saved_hangcheck;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index e19509bd135b..3c144b376a4e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -67,7 +67,7 @@ static int live_sanitycheck(void *arg)
 err_spin:
 	igt_spinner_fini(&spin);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK);
 	intel_runtime_pm_put(i915, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -161,7 +161,7 @@ static int live_preempt(void *arg)
 err_spin_hi:
 	igt_spinner_fini(&spin_hi);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK);
 	intel_runtime_pm_put(i915, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -255,7 +255,7 @@ static int live_late_preempt(void *arg)
 err_spin_hi:
 	igt_spinner_fini(&spin_hi);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK);
 	intel_runtime_pm_put(i915, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
@@ -379,7 +379,7 @@ static int live_preempt_hang(void *arg)
 err_spin_hi:
 	igt_spinner_fini(&spin_hi);
 err_unlock:
-	igt_flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK);
 	intel_runtime_pm_put(i915, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
 	return err;
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 914221897b6b..2671e216041f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -452,7 +452,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
 	igt_global_reset_unlock(i915);
 	kernel_context_close(ctx);
 
-	igt_flush_test(i915, I915_WAIT_LOCKED);
+	igt_flush_test(i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_PARK);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index e4db9a31b510..2009e776b5d8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -49,6 +49,7 @@ mock_context(struct drm_i915_private *i915,
 		struct intel_context *ce = &ctx->__engine[n];
 
 		ce->gem_context = ctx;
+		INIT_LIST_HEAD(&ce->active_link);
 		INIT_LIST_HEAD(&ce->signal_link);
 		INIT_LIST_HEAD(&ce->signals);
 	}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index cd3ff0b13cef..85718105c3bd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -214,7 +214,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
 	i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
 
+	engine->base.execlists.queue_priority = INT_MIN;
+	engine->base.execlists.queue = RB_ROOT_CACHED;
+
 	intel_engine_init_breadcrumbs(&engine->base);
+	intel_engine_init_batch_pool(&engine->base);
 
 	/* fake hw queue */
 	spin_lock_init(&engine->hw_lock);
@@ -256,15 +260,10 @@ void mock_engine_free(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
 		container_of(engine, typeof(*mock), base);
-	struct intel_context *ce;
 
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-	ce = fetch_and_zero(&engine->last_retired_context);
-	if (ce)
-		intel_context_unpin(ce);
-
-	__intel_context_unpin(engine->i915->kernel_context, engine);
+	context_unpin(engine->i915->kernel_context, engine);
 
 	intel_engine_fini_breadcrumbs(engine);
 	i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 40f3774b2666..85798dee3c2b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -47,6 +47,15 @@ void mock_device_flush(struct drm_i915_private *i915)
 	GEM_BUG_ON(i915->gt.active_requests);
 }
 
+static void mock_device_park(struct drm_i915_private *i915)
+{
+	intel_engines_park(i915);
+	intel_contexts_park(i915);
+	i915_timelines_park(i915);
+
+	i915_vma_parked(i915);
+}
+
 static void mock_device_release(struct drm_device *dev)
 {
 	struct drm_i915_private *i915 = to_i915(dev);
@@ -55,7 +64,7 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_device_flush(i915);
-	i915_gem_contexts_lost(i915);
+	mock_device_park(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	cancel_delayed_work_sync(&i915->gt.retire_work);
@@ -202,6 +211,7 @@ struct drm_i915_private *mock_gem_device(void)
 	INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
 
 	i915->gt.awake = true;
+	i915->gt.epoch = 1;
 
 	i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
 	if (!i915->objects)
@@ -230,6 +240,7 @@ struct drm_i915_private *mock_gem_device(void)
 
 	i915_timelines_init(i915);
 
+	INIT_LIST_HEAD(&i915->gt.active_contexts);
 	INIT_LIST_HEAD(&i915->gt.active_rings);
 	INIT_LIST_HEAD(&i915->gt.closed_vma);
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list