[PATCH 59/59] unordered-execution-no-engine-order

Chris Wilson chris at chris-wilson.co.uk
Thu Dec 27 17:33:07 UTC 2018


---
 drivers/gpu/drm/i915/i915_drv.h               |   1 +
 drivers/gpu/drm/i915/i915_gem.c               | 149 ++++++------------
 drivers/gpu/drm/i915/i915_gem_context.c       | 123 ++++++---------
 drivers/gpu/drm/i915/i915_gem_context.h       |  15 +-
 drivers/gpu/drm/i915/i915_gem_evict.c         |  25 +--
 drivers/gpu/drm/i915/i915_request.c           |  40 ++---
 drivers/gpu/drm/i915/i915_request.h           |   1 +
 drivers/gpu/drm/i915/intel_engine_cs.c        |  60 +------
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  14 --
 .../gpu/drm/i915/selftests/i915_gem_context.c |  39 +----
 .../gpu/drm/i915/selftests/igt_flush_test.c   |   2 +-
 drivers/gpu/drm/i915/selftests/mock_engine.c  |   7 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   2 +-
 13 files changed, 157 insertions(+), 321 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 92fd201892a9..b39ebe1dfbe8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1965,6 +1965,7 @@ struct drm_i915_private {
 		struct i915_vma *timeline_hwsp;
 		u64 timeline_free;
 
+		struct list_head active_contexts;
 		struct list_head active_rings;
 		struct list_head closed_vma;
 		u32 active_requests;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6405bd6f6924..b923052e36b2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -169,6 +169,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	synchronize_irq(i915->drm.irq);
 
 	intel_engines_park(i915);
+	intel_contexts_park(i915);
 	i915_timelines_park(i915);
 
 	i915_pmu_gt_parked(i915);
@@ -3137,43 +3138,23 @@ static void __sleep_rcu(struct rcu_head *rcu)
 	}
 }
 
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
-	return (READ_ONCE(i915->gt.active_requests) ||
-		work_pending(&i915->gt.idle_work.work));
-}
-
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	if (i915_terminally_wedged(&i915->gpu_error))
-		return;
-
-	GEM_BUG_ON(i915->gt.active_requests);
-	for_each_engine(engine, i915, id) {
-		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
-		GEM_BUG_ON(engine->last_retired_context !=
-			   to_intel_context(i915->kernel_context, engine));
-	}
-}
-
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.idle_work.work);
+	struct drm_i915_private *i915 =
+		container_of(work, typeof(*i915), gt.idle_work.work);
 	unsigned int epoch = I915_EPOCH_INVALID;
 	bool rearm_hangcheck;
 
-	if (!READ_ONCE(dev_priv->gt.awake))
+	if (!READ_ONCE(i915->gt.awake))
 		return;
 
-	if (READ_ONCE(dev_priv->gt.active_requests))
+	if (READ_ONCE(i915->gt.active_requests))
 		return;
 
+	rearm_hangcheck =
+		cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+
 	/*
 	 * Flush out the last user context, leaving only the pinned
 	 * kernel context resident. When we are idling on the kernel_context,
@@ -3182,56 +3163,40 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	 * always called at least twice before idling (and if the system is
 	 * idle that implies a round trip through the retire worker).
 	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	i915_gem_switch_to_kernel_context(dev_priv);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-	GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
-		  READ_ONCE(dev_priv->gt.active_requests));
-
-	/*
-	 * Wait for last execlists context complete, but bail out in case a
-	 * new request is submitted. As we don't trust the hardware, we
-	 * continue on if the wait times out. This is necessary to allow
-	 * the machine to suspend even if the hardware dies, and we will
-	 * try to recover in resume (after depriving the hardware of power,
-	 * it may be in a better mmod).
-	 */
-	__wait_for(if (new_requests_since_last_retire(dev_priv)) return,
-		   intel_engines_are_idle(dev_priv),
-		   I915_IDLE_ENGINES_TIMEOUT * 1000,
-		   10, 500);
-
-	rearm_hangcheck =
-		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
-	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+	if (!mutex_trylock(&i915->drm.struct_mutex)) {
 		/* Currently busy, come back later */
-		mod_delayed_work(dev_priv->wq,
-				 &dev_priv->gt.idle_work,
+		mod_delayed_work(i915->wq,
+				 &i915->gt.idle_work,
 				 msecs_to_jiffies(50));
 		goto out_rearm;
 	}
 
-	/*
-	 * New request retired after this work handler started, extend active
-	 * period until next instance of the work.
-	 */
-	if (new_requests_since_last_retire(dev_priv))
-		goto out_unlock;
+	if (!i915->gt.active_requests &&
+	    !work_pending(&i915->gt.idle_work.work)) {
+		++i915->gt.active_requests; /* don't requeue idle! */
 
-	epoch = __i915_gem_park(dev_priv);
+		if (i915_gem_switch_to_kernel_context_sync(i915,
+							   I915_WAIT_LOCKED,
+							   HZ / 10)) {
+			dev_err(i915->drm.dev,
+				"Failed to idle engines, declaring wedged!\n");
+			GEM_TRACE_DUMP();
+			i915_gem_set_wedged(i915);
+			i915_retire_requests(i915);
+		}
 
-	assert_kernel_context_is_current(dev_priv);
+		if (!--i915->gt.active_requests) {
+			epoch = __i915_gem_park(i915);
+			rearm_hangcheck = false;
+		}
+	}
 
-	rearm_hangcheck = false;
-out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 out_rearm:
 	if (rearm_hangcheck) {
-		GEM_BUG_ON(!dev_priv->gt.awake);
-		i915_queue_hangcheck(dev_priv);
+		GEM_BUG_ON(!i915->gt.awake);
+		i915_queue_hangcheck(i915);
 	}
 
 	/*
@@ -3242,11 +3207,11 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	 * period, and then queue a task (that will run last on the wq) to
 	 * shrink and re-optimize the caches.
 	 */
-	if (same_epoch(dev_priv, epoch)) {
+	if (same_epoch(i915, epoch)) {
 		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
 		if (s) {
 			init_rcu_head(&s->rcu);
-			s->i915 = dev_priv;
+			s->i915 = i915;
 			s->epoch = epoch;
 			call_rcu(&s->rcu, __sleep_rcu);
 		}
@@ -3451,6 +3416,9 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
 
 		i915_retire_requests(i915);
 		GEM_BUG_ON(i915->gt.active_requests);
+
+		if (flags & I915_WAIT_FOR_IDLE_PARK)
+			__i915_gem_park(i915);
 	}
 
 	return 0;
@@ -4658,10 +4626,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 
 	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
 	intel_runtime_pm_put(i915, wakeref);
-
-	mutex_lock(&i915->drm.struct_mutex);
-	i915_gem_contexts_lost(i915);
-	mutex_unlock(&i915->drm.struct_mutex);
 }
 
 int i915_gem_suspend(struct drm_i915_private *i915)
@@ -4688,15 +4652,11 @@ int i915_gem_suspend(struct drm_i915_private *i915)
 	 * not rely on its state.
 	 */
 	if (!i915_terminally_wedged(&i915->gpu_error)) {
-		ret = i915_gem_switch_to_kernel_context(i915);
-		if (ret)
-			goto err_unlock;
-
-		ret = i915_gem_wait_for_idle(i915,
-					     I915_WAIT_INTERRUPTIBLE |
-					     I915_WAIT_LOCKED |
-					     I915_WAIT_FOR_IDLE_BOOST,
-					     HZ / 10);
+		ret = i915_gem_switch_to_kernel_context_sync(i915,
+							     I915_WAIT_LOCKED |
+							     I915_WAIT_INTERRUPTIBLE |
+							     I915_WAIT_FOR_IDLE_BOOST,
+							     HZ / 10);
 		if (ret == -EINTR)
 			goto err_unlock;
 
@@ -4796,7 +4756,9 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	intel_uc_resume(i915);
 
 	/* Always reload a context for powersaving. */
-	if (i915_gem_switch_to_kernel_context(i915))
+	if (i915_gem_switch_to_kernel_context_sync(i915,
+						   I915_WAIT_LOCKED,
+						   MAX_SCHEDULE_TIMEOUT))
 		goto err_wedged;
 
 out_unlock:
@@ -4992,18 +4954,14 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 			goto err_active;
 	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		goto err_active;
-
-	if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+	if (i915_gem_switch_to_kernel_context_sync(i915,
+						   I915_WAIT_LOCKED,
+						   HZ / 5)) {
 		i915_gem_set_wedged(i915);
 		err = -EIO; /* Caller will declare us wedged */
 		goto err_active;
 	}
 
-	assert_kernel_context_is_current(i915);
-
 	/*
 	 * Immediately park the GPU so that we enable powersaving and
 	 * treat it as idle. The next time we issue a request, we will
@@ -5081,15 +5039,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	 * request, ensure we are pointing at the kernel context and
 	 * then remove it.
 	 */
-	if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
-		goto out_ctx;
-
-	if (WARN_ON(i915_gem_wait_for_idle(i915,
-					   I915_WAIT_LOCKED,
-					   MAX_SCHEDULE_TIMEOUT)))
-		goto out_ctx;
+	if (i915_gem_switch_to_kernel_context_sync(i915,
+						   I915_WAIT_LOCKED,
+						   HZ / 10))
+		i915_gem_set_wedged(i915);
 
-	i915_gem_contexts_lost(i915);
 	goto out_ctx;
 }
 
@@ -5431,6 +5385,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
 	mutex_init(&dev_priv->gt.timeline_lock);
 	INIT_LIST_HEAD(&dev_priv->gt.timelines);
 
+	INIT_LIST_HEAD(&dev_priv->gt.active_contexts);
 	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
 	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 93eb685035c2..22aec4fddff9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -358,6 +358,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 
 		ce->gem_context = ctx;
 		ce->owner = dev_priv->engine[n];
+		INIT_LIST_HEAD(&ce->active_link);
 	}
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
@@ -624,17 +625,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-	for_each_engine(engine, dev_priv, id)
-		intel_engine_lost_context(engine);
-}
-
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
@@ -724,84 +714,37 @@ last_request_on_engine(struct i915_timeline *timeline,
 	return NULL;
 }
 
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *i915 = engine->i915;
-	const struct intel_context * const ce =
-		to_intel_context(i915->kernel_context, engine);
-	struct i915_timeline *barrier = ce->ring->timeline;
-	struct intel_ring *ring;
-	bool any_active = false;
-
-	lockdep_assert_held(&i915->drm.struct_mutex);
-	list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
-		struct i915_request *rq;
-
-		rq = last_request_on_engine(ring->timeline, engine);
-		if (!rq)
-			continue;
-
-		any_active = true;
-
-		if (rq->hw_context == ce)
-			continue;
-
-		/*
-		 * Was this request submitted after the previous
-		 * switch-to-kernel-context?
-		 */
-		if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
-			GEM_TRACE("%s needs barrier for %llx:%lld\n",
-				  ring->timeline->name,
-				  rq->fence.context,
-				  rq->fence.seqno);
-			return false;
-		}
-
-		GEM_TRACE("%s has barrier after %llx:%lld\n",
-			  ring->timeline->name,
-			  rq->fence.context,
-			  rq->fence.seqno);
-	}
-
-	/*
-	 * If any other timeline was still active and behind the last barrier,
-	 * then our last switch-to-kernel-context must still be queued and
-	 * will run last (leaving the engine in the kernel context when it
-	 * eventually idles).
-	 */
-	if (any_active)
-		return true;
-
-	/* The engine is idle; check that it is idling in the kernel context. */
-	return engine->last_retired_context == ce;
-}
-
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context_sync(struct drm_i915_private *i915,
+					   unsigned int flags,
+					   unsigned long timeout)
 {
+	struct i915_request *wait[I915_NUM_ENGINES] = {};
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-
-	GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
+	int err = 0;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915->kernel_context);
+	GEM_BUG_ON(!(flags & I915_WAIT_LOCKED));
 
-	i915_retire_requests(i915);
+	/* Inoperable, so presume the GPU is pointing into the void! */
+	if (i915_terminally_wedged(&i915->gpu_error))
+		return 0;
 
 	for_each_engine(engine, i915, id) {
 		struct intel_ring *ring;
 		struct i915_request *rq;
 
-		GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
-		if (engine_has_kernel_context_barrier(engine))
+		if (!engine->context_size)
 			continue;
 
-		GEM_TRACE("emit barrier on %s\n", engine->name);
+		GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
 
 		rq = i915_request_alloc(engine, i915->kernel_context);
-		if (IS_ERR(rq))
-			return PTR_ERR(rq);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
 
 		/* Queue this switch after all other activity */
 		list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
@@ -824,10 +767,28 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 			i915_timeline_sync_set(rq->timeline, &prev->fence);
 		}
 
+		if (flags & I915_WAIT_FOR_IDLE_BOOST)
+			gen6_rps_boost(rq, NULL);
+
+		wait[id] = i915_request_get(rq);
 		i915_request_add(rq);
 	}
 
-	return 0;
+	for_each_engine(engine, i915, id) {
+		if (!wait[id])
+			continue;
+
+		if (err == 0) {
+			timeout = i915_request_wait(wait[id], flags, timeout);
+			if (timeout < 0)
+				err = timeout;
+		}
+
+		i915_request_put(wait[id]);
+	}
+
+	i915_retire_requests(i915);
+	return err;
 }
 
 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -1436,6 +1397,20 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
 	return err;
 }
 
+void intel_contexts_park(struct drm_i915_private *i915)
+{
+	struct intel_context *ce, *cn;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+
+	list_for_each_entry_safe(ce, cn,
+				 &i915->gt.active_contexts, active_link) {
+		INIT_LIST_HEAD(&ce->active_link);
+		intel_context_unpin(ce);
+	}
+	INIT_LIST_HEAD(&i915->gt.active_contexts);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_context.c"
 #include "selftests/i915_gem_context.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 3e3aee925f8a..5c64e47b9f6a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -172,6 +172,7 @@ struct i915_gem_context {
 		struct i915_gem_context *gem_context;
 		struct intel_engine_cs *active;
 		struct intel_engine_cs *owner;
+		struct list_head active_link;
 		struct i915_vma *state;
 		struct intel_ring *ring;
 		u32 *lrc_reg_state;
@@ -320,6 +321,12 @@ static inline void __intel_context_pin(struct intel_context *ce)
 	ce->pin_count++;
 }
 
+static inline void __intel_context_unpin(struct intel_context *ce)
+{
+	GEM_BUG_ON(!ce->pin_count);
+	ce->pin_count--;
+}
+
 static inline void intel_context_unpin(struct intel_context *ce)
 {
 	GEM_BUG_ON(!ce->pin_count);
@@ -330,17 +337,19 @@ static inline void intel_context_unpin(struct intel_context *ce)
 	ce->ops->unpin(ce);
 }
 
+void intel_contexts_park(struct drm_i915_private *i915);
+
 /* i915_gem_context.c */
 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
 void i915_gem_context_close(struct drm_file *file);
 
-int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context_sync(struct drm_i915_private *i915,
+					   unsigned int flags,
+					   unsigned long timeout);
 
 void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8ccde5761c2c..6048298c682e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -39,18 +39,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 
 static bool ggtt_is_idle(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       if (i915->gt.active_requests)
-	       return false;
-
-       for_each_engine(engine, i915, id) {
-	       if (!intel_engine_has_kernel_context(engine))
-		       return false;
-       }
-
-       return true;
+	return !i915->gt.active_requests;
 }
 
 static int ggtt_flush(struct drm_i915_private *i915)
@@ -63,14 +52,10 @@ static int ggtt_flush(struct drm_i915_private *i915)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		return err;
-
-	err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_INTERRUPTIBLE |
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
+	err = i915_gem_switch_to_kernel_context_sync(i915,
+						     I915_WAIT_LOCKED |
+						     I915_WAIT_INTERRUPTIBLE,
+						     MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		return err;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index adf60a50434b..dc00e96615e7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -210,18 +210,6 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
 	spin_unlock(&rq->lock);
 
 	local_irq_enable();
-
-	/*
-	 * The backing object for the context is done after switching to the
-	 * *next* context. Therefore we cannot retire the previous context until
-	 * the next context has already started running. However, since we
-	 * cannot take the required locks at i915_request_submit() we
-	 * defer the unpinning of the active context to now, retirement of
-	 * the subsequent request.
-	 */
-	if (engine->last_retired_context)
-		intel_context_unpin(engine->last_retired_context);
-	engine->last_retired_context = rq->hw_context;
 }
 
 static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -294,7 +282,6 @@ static void i915_request_retire(struct i915_request *request)
 
 	/* Retirement decays the ban score as it is a sign of ctx progress */
 	atomic_dec_if_positive(&request->gem_context->ban_score);
-	intel_context_unpin(request->hw_context);
 
 	__retire_engine_upto(request->engine, request);
 
@@ -522,8 +509,15 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * ourselves.
 	 */
 	ce = intel_context_pin(ctx, engine);
-	if (IS_ERR(ce))
-		return ERR_CAST(ce);
+	if (IS_ERR(ce)) {
+		i915_gem_wait_for_idle(i915,
+				       I915_WAIT_LOCKED |
+				       I915_WAIT_FOR_IDLE_PARK,
+				       MAX_SCHEDULE_TIMEOUT);
+		ce = intel_context_pin(ctx, engine);
+		if (IS_ERR(ce))
+			return ERR_CAST(ce);
+	}
 
 	reserve_gt(i915);
 
@@ -639,9 +633,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	if (ret)
 		goto err_unwind;
 
-	/* Keep a second pin for the dual retirement along engine and ring */
-	__intel_context_pin(ce);
-
 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
 
 	/* Check that we didn't interrupt ourselves with a new request */
@@ -836,6 +827,17 @@ void i915_request_skip(struct i915_request *rq, int error)
 	memset(vaddr + head, 0, rq->postfix - head);
 }
 
+static void pin_active_context(struct drm_i915_private *i915,
+			       struct intel_context *ce)
+{
+	if (unlikely(list_empty(&ce->active_link))) {
+		list_add(&ce->active_link, &i915->gt.active_contexts);
+		__intel_context_pin(ce);
+	}
+	__intel_context_unpin(ce);
+	GEM_BUG_ON(!ce->pin_count);
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
@@ -917,6 +919,8 @@ void i915_request_add(struct i915_request *request)
 		GEM_TRACE("marking %s as active\n", ring->timeline->name);
 		list_add(&ring->active_link, &request->i915->gt.active_rings);
 	}
+	pin_active_context(request->i915, request->hw_context);
+
 	request->emitted_jiffies = jiffies;
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7956f14c8e14..d06988099c6b 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -288,6 +288,7 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_PRIORITY	BIT(2) /* small priority bump for the request */
 #define I915_WAIT_ALL		BIT(3) /* used by i915_gem_object_wait() */
 #define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_FOR_IDLE_PARK BIT(5)
 
 /**
  * Returns true if seq1 is later than seq2.
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index df81d53a9e28..f12c343f9df7 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -587,8 +587,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static void __intel_context_unpin(struct i915_gem_context *ctx,
-				  struct intel_engine_cs *engine)
+static void context_unpin(struct i915_gem_context *ctx,
+			  struct intel_engine_cs *engine)
 {
 	intel_context_unpin(to_intel_context(ctx, engine));
 }
@@ -652,10 +652,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
 	if (i915->preempt_context)
-		__intel_context_unpin(i915->preempt_context, engine);
+		context_unpin(i915->preempt_context, engine);
 
 err_unpin_kernel:
-	__intel_context_unpin(i915->kernel_context, engine);
+	context_unpin(i915->kernel_context, engine);
 	return ret;
 }
 
@@ -680,8 +680,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 		i915_gem_object_put(engine->default_state);
 
 	if (i915->preempt_context)
-		__intel_context_unpin(i915->preempt_context, engine);
-	__intel_context_unpin(i915->kernel_context, engine);
+		context_unpin(i915->preempt_context, engine);
+	context_unpin(i915->kernel_context, engine);
 
 	i915_timeline_fini(&engine->timeline);
 
@@ -998,34 +998,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
 	return true;
 }
 
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
-	const struct intel_context *kernel_context =
-		to_intel_context(engine->i915->kernel_context, engine);
-	struct i915_request *rq;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	/*
-	 * Check the last context seen by the engine. If active, it will be
-	 * the last request that remains in the timeline. When idle, it is
-	 * the last executed context as tracked by retirement.
-	 */
-	rq = __i915_gem_active_peek(&engine->timeline.last_request);
-	if (rq)
-		return rq->hw_context == kernel_context;
-	else
-		return engine->last_retired_context == kernel_context;
-}
-
 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
@@ -1144,26 +1116,6 @@ void intel_engines_unpark(struct drm_i915_private *i915)
 	}
 }
 
-/**
- * intel_engine_lost_context: called when the GPU is reset into unknown state
- * @engine: the engine
- *
- * We have either reset the GPU or otherwise about to lose state tracking of
- * the current GPU logical state (e.g. suspend). On next use, it is therefore
- * imperative that we make no presumptions about the current state and load
- * from scratch.
- */
-void intel_engine_lost_context(struct intel_engine_cs *engine)
-{
-	struct intel_context *ce;
-
-	lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
-	ce = fetch_and_zero(&engine->last_retired_context);
-	if (ce)
-		intel_context_unpin(ce);
-}
-
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
 	switch (INTEL_GEN(engine->i915)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index cd18f6fff265..ac02a52beb41 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -456,17 +456,6 @@ struct intel_engine_cs {
 
 	struct intel_engine_execlists execlists;
 
-	/* Contexts are pinned whilst they are active on the GPU. The last
-	 * context executed remains active whilst the GPU is idle - the
-	 * switch away and write to the context object only occurs on the
-	 * next execution.  Contexts are only unpinned on retirement of the
-	 * following request ensuring that we can always write to the object
-	 * on the context switch even after idling. Across suspend, we switch
-	 * to the kernel context and trash it as the save may not happen
-	 * before the hardware is powered down.
-	 */
-	struct intel_context *last_retired_context;
-
 	/* status_notifier: list of callbacks for context-switch changes */
 	struct atomic_notifier_head context_status_notifier;
 
@@ -997,9 +986,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
-void intel_engine_lost_context(struct intel_engine_cs *engine);
-
 void intel_engines_park(struct drm_i915_private *i915);
 void intel_engines_unpark(struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 3294a271c9ed..b3cc66baf5f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -1258,34 +1258,15 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
 		i915_request_add(rq);
 	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
+	err = i915_gem_switch_to_kernel_context_sync(i915,
+						     I915_WAIT_LOCKED,
+						     MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		return err;
 
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (!engine_has_kernel_context_barrier(engine)) {
-			pr_err("kernel context not last on engine %s!\n",
-			       engine->name);
-			return -EINVAL;
-		}
-	}
-
-	err = i915_gem_wait_for_idle(i915,
-				     I915_WAIT_LOCKED,
-				     MAX_SCHEDULE_TIMEOUT);
-	if (err)
-		return err;
-
-	GEM_BUG_ON(i915->gt.active_requests);
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (engine->last_retired_context->gem_context != i915->kernel_context) {
-			pr_err("engine %s not idling in kernel context!\n",
-			       engine->name);
-			return -EINVAL;
-		}
-	}
-
-	err = i915_gem_switch_to_kernel_context(i915);
+	err = i915_gem_switch_to_kernel_context_sync(i915,
+						     I915_WAIT_LOCKED,
+						     MAX_SCHEDULE_TIMEOUT);
 	if (err)
 		return err;
 
@@ -1295,14 +1276,6 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
 		return -EINVAL;
 	}
 
-	for_each_engine_masked(engine, i915, engines, tmp) {
-		if (!intel_engine_has_kernel_context(engine)) {
-			pr_err("kernel context not last on engine %s!\n",
-			       engine->name);
-			return -EINVAL;
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..1264fe9b6c38 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 	cond_resched();
 
 	if (flags & I915_WAIT_LOCKED &&
-	    i915_gem_switch_to_kernel_context(i915)) {
+	    i915_gem_switch_to_kernel_context_sync(i915, flags, HZ / 5)) {
 		pr_err("Failed to switch back to kernel context; declaring wedged\n");
 		i915_gem_set_wedged(i915);
 	}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 50e1a0b1af7e..d293cf9b3308 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -251,15 +251,10 @@ void mock_engine_free(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
 		container_of(engine, typeof(*mock), base);
-	struct intel_context *ce;
 
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-	ce = fetch_and_zero(&engine->last_retired_context);
-	if (ce)
-		intel_context_unpin(ce);
-
-	__intel_context_unpin(engine->i915->kernel_context, engine);
+	context_unpin(engine->i915->kernel_context, engine);
 
 	mock_ring_free(engine->buffer);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index b2292eb609f8..7292ff40fdf8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -55,7 +55,6 @@ static void mock_device_release(struct drm_device *dev)
 
 	mutex_lock(&i915->drm.struct_mutex);
 	mock_device_flush(i915);
-	i915_gem_contexts_lost(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	cancel_delayed_work_sync(&i915->gt.retire_work);
@@ -232,6 +231,7 @@ struct drm_i915_private *mock_gem_device(void)
 	mutex_init(&i915->gt.timeline_lock);
 	INIT_LIST_HEAD(&i915->gt.timelines);
 
+	INIT_LIST_HEAD(&i915->gt.active_contexts);
 	INIT_LIST_HEAD(&i915->gt.active_rings);
 	INIT_LIST_HEAD(&i915->gt.closed_vma);
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list