[Intel-gfx] [PATCH v4] drm/i915: Beware temporary wedging when determining -EIO

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 19 14:38:45 UTC 2019


At a few points in our uABI, we check to see if the driver is wedged and
report -EIO back to the user in that case. However, as we perform the
check and reset asynchronously, we may instead see the temporary wedging
used to cancel inflight rendering to avoid a deadlock during reset. If
we suspect this is the case, that is we see a wedged driver and reset in
progress, then wait until the reset is resolved before reporting upon
the wedged status.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109580
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
Ah! intel_reset_finish() can still hit struct_mutex so we can't simply
wait for the reset to complete while holding struct_mutex ourselves.
#$#@#$@
---
 drivers/gpu/drm/i915/i915_debugfs.c           |  6 ++---
 drivers/gpu/drm/i915/i915_drv.h               |  7 ++++-
 drivers/gpu/drm/i915/i915_gem.c               | 19 ++++++-------
 drivers/gpu/drm/i915/i915_request.c           |  5 ++--
 drivers/gpu/drm/i915/i915_reset.c             | 27 +++++++++++++++++--
 drivers/gpu/drm/i915/i915_reset.h             |  2 ++
 drivers/gpu/drm/i915/intel_engine_cs.c        |  8 +++---
 drivers/gpu/drm/i915/intel_hangcheck.c        |  2 +-
 drivers/gpu/drm/i915/selftests/huge_pages.c   |  2 +-
 drivers/gpu/drm/i915/selftests/i915_active.c  |  2 +-
 .../drm/i915/selftests/i915_gem_coherency.c   |  4 +--
 .../gpu/drm/i915/selftests/i915_gem_context.c |  2 +-
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |  2 +-
 .../gpu/drm/i915/selftests/i915_gem_object.c  |  2 +-
 drivers/gpu/drm/i915/selftests/i915_request.c |  2 +-
 .../gpu/drm/i915/selftests/igt_flush_test.c   |  2 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  | 24 ++++++++---------
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  2 +-
 .../drm/i915/selftests/intel_workarounds.c    |  4 +--
 19 files changed, 76 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2aeea977283f..ffcc98842f25 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3833,9 +3833,7 @@ static const struct file_operations i915_cur_wm_latency_fops = {
 static int
 i915_wedged_get(void *data, u64 *val)
 {
-	struct drm_i915_private *dev_priv = data;
-
-	*val = i915_terminally_wedged(&dev_priv->gpu_error);
+	*val = i915_is_wedged(data);
 
 	return 0;
 }
@@ -3918,7 +3916,7 @@ i915_drop_caches_set(void *data, u64 val)
 		mutex_unlock(&i915->drm.struct_mutex);
 	}
 
-	if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
+	if (val & DROP_RESET_ACTIVE && i915_is_wedged(i915))
 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
 
 	fs_reclaim_acquire(GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5c8d0489a1cd..3354b2726ca9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3020,11 +3020,16 @@ int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 struct i915_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool __i915_wedged(struct i915_gpu_error *error)
 {
 	return unlikely(test_bit(I915_WEDGED, &error->flags));
 }
 
+static inline bool i915_is_wedged(struct drm_i915_private *i915)
+{
+	return __i915_wedged(&i915->gpu_error);
+}
+
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
 {
 	return READ_ONCE(error->reset_count);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b421bc7a2e26..0810718cbeac 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1928,7 +1928,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
 		 * and so needs to be reported.
 		 */
-		if (!i915_terminally_wedged(&dev_priv->gpu_error))
+		if (!i915_is_wedged(dev_priv))
 			return VM_FAULT_SIGBUS;
 		/* else: fall through */
 	case -EAGAIN:
@@ -2958,7 +2958,7 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return;
 
 	GEM_BUG_ON(i915->gt.active_requests);
@@ -3806,8 +3806,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	long ret;
 
 	/* ABI: return -EIO if already wedged */
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
-		return -EIO;
+	ret = i915_terminally_wedged(dev_priv);
+	if (ret)
+		return ret;
 
 	spin_lock(&file_priv->mm.lock);
 	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
@@ -4460,7 +4461,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	 * back to defaults, recovering from whatever wedged state we left it
 	 * in and so worth trying to use the device once more.
 	 */
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		i915_gem_unset_wedged(i915);
 
 	/*
@@ -4504,7 +4505,7 @@ int i915_gem_suspend(struct drm_i915_private *i915)
 	 * state. Fortunately, the kernel_context is disposable and we do
 	 * not rely on its state.
 	 */
-	if (!i915_terminally_wedged(&i915->gpu_error)) {
+	if (!i915_is_wedged(i915)) {
 		ret = i915_gem_switch_to_kernel_context(i915);
 		if (ret)
 			goto err_unlock;
@@ -4625,7 +4626,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	return;
 
 err_wedged:
-	if (!i915_terminally_wedged(&i915->gpu_error)) {
+	if (!i915_is_wedged(i915)) {
 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
 		i915_gem_set_wedged(i915);
 	}
@@ -4731,7 +4732,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 	init_unused_rings(dev_priv);
 
 	BUG_ON(!dev_priv->kernel_context);
-	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+	if (i915_is_wedged(dev_priv)) {
 		ret = -EIO;
 		goto out;
 	}
@@ -5107,7 +5108,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
-		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+		if (!i915_is_wedged(dev_priv)) {
 			i915_load_error(dev_priv,
 					"Failed to initialize GPU, declaring it wedged!\n");
 			i915_gem_set_wedged(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5ab4e1c01618..9bd252742913 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -561,8 +561,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged.
 	 */
-	if (i915_terminally_wedged(&i915->gpu_error))
-		return ERR_PTR(-EIO);
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ERR_PTR(ret);
 
 	/*
 	 * Pinning the contexts may generate requests in order to acquire
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 4df4c674223d..c474c9297333 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -1027,7 +1027,7 @@ void i915_reset(struct drm_i915_private *i915,
 
 finish:
 	reset_finish(i915);
-	if (!i915_terminally_wedged(error))
+	if (!__i915_wedged(error))
 		reset_restart(i915);
 	return;
 
@@ -1248,7 +1248,7 @@ void i915_handle_error(struct drm_i915_private *i915,
 	 * Try engine reset when available. We fall back to full reset if
 	 * single reset fails.
 	 */
-	if (intel_has_reset_engine(i915) && !i915_terminally_wedged(error)) {
+	if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
 		for_each_engine_masked(engine, i915, engine_mask, tmp) {
 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
@@ -1334,6 +1334,29 @@ __releases(&i915->gpu_error.reset_backoff_srcu)
 	srcu_read_unlock(&error->reset_backoff_srcu, tag);
 }
 
+int i915_terminally_wedged(struct drm_i915_private *i915)
+{
+	struct i915_gpu_error *error = &i915->gpu_error;
+
+	if (!__i915_wedged(error))
+		return 0;
+
+	/* Reset still in progress? Maybe we will recover? */
+	if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+		return -EIO;
+
+	/* XXX intel_reset_finish() still takes struct_mutex!!! */
+	if (mutex_is_locked(&i915->drm.struct_mutex))
+		return -EAGAIN;
+
+	if (wait_event_interruptible(error->reset_queue,
+				     !test_bit(I915_RESET_BACKOFF,
+					       &error->flags)))
+		return -EINTR;
+
+	return __i915_wedged(error) ? -EIO : 0;
+}
+
 bool i915_reset_flush(struct drm_i915_private *i915)
 {
 	int err;
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
index 893c5d1c2eb8..16f2389f656f 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -36,6 +36,8 @@ bool i915_reset_flush(struct drm_i915_private *i915);
 int __must_check i915_reset_trylock(struct drm_i915_private *i915);
 void i915_reset_unlock(struct drm_i915_private *i915, int tag);
 
+int i915_terminally_wedged(struct drm_i915_private *i915);
+
 bool intel_has_gpu_reset(struct drm_i915_private *i915);
 bool intel_has_reset_engine(struct drm_i915_private *i915);
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 2547e2e51db8..b0f1a9256fd2 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1007,10 +1007,8 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-
 	/* More white lies, if wedged, hw state is inconsistent */
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_is_wedged(engine->i915))
 		return true;
 
 	/* Any inflight/incomplete requests? */
@@ -1054,7 +1052,7 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
 	 * If the driver is wedged, HW state may be very inconsistent and
 	 * report that it is still busy, even though we have stopped using it.
 	 */
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_is_wedged(dev_priv))
 		return true;
 
 	for_each_engine(engine, dev_priv, id) {
@@ -1496,7 +1494,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		va_end(ap);
 	}
 
-	if (i915_terminally_wedged(&engine->i915->gpu_error))
+	if (i915_is_wedged(engine->i915))
 		drm_printf(m, "*** WEDGED ***\n");
 
 	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index a219c796e56d..9bfe9cb816cb 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -263,7 +263,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 	if (!READ_ONCE(dev_priv->gt.awake))
 		return;
 
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_is_wedged(dev_priv))
 		return;
 
 	/* As enabling the GPU requires fairly extensive mmio access,
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index a9a2fa35876f..2cee1b4b3e48 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1764,7 +1764,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
 		return 0;
 	}
 
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_is_wedged(dev_priv))
 		return 0;
 
 	file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 337b1f98b923..e67ed445d23e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_active_retire),
 	};
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index fd89a5a33c1a..81c761acc43e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -248,12 +248,12 @@ static bool always_valid(struct drm_i915_private *i915)
 
 static bool needs_fence_registers(struct drm_i915_private *i915)
 {
-	return !i915_terminally_wedged(&i915->gpu_error);
+	return !i915_is_wedged(i915);
 }
 
 static bool needs_mi_store_dword(struct drm_i915_private *i915)
 {
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return false;
 
 	return intel_engine_can_store_dword(i915->engine[RCS]);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index b7b97c57ad05..97ccd0f274b7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -1632,7 +1632,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 		SUBTEST(igt_vm_isolation),
 	};
 
-	if (i915_terminally_wedged(&dev_priv->gpu_error))
+	if (i915_is_wedged(dev_priv))
 		return 0;
 
 	return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..413d0c16c380 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_evict_contexts),
 	};
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 395ae878e0f7..0d8e759fdbf8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
 	for (loop = 0; loop < 3; loop++) {
 		intel_wakeref_t wakeref;
 
-		if (i915_terminally_wedged(&i915->gpu_error))
+		if (i915_is_wedged(i915))
 			break;
 
 		obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6733dc5b6b4c..e231cabf7f1a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -1246,7 +1246,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_breadcrumbs_smoketest),
 	};
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..3bfc36c8bc67 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 		i915_gem_set_wedged(i915);
 	}
 
-	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+	return i915_is_wedged(i915) ? -EIO : 0;
 }
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index c32bc31192ae..e512d4406b92 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -342,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg)
 			timeout = i915_request_wait(rq,
 						    I915_WAIT_LOCKED,
 						    MAX_SCHEDULE_TIMEOUT);
-		if (i915_terminally_wedged(&i915->gpu_error))
+		if (i915_is_wedged(i915))
 			timeout = -EIO;
 
 		i915_request_put(rq);
@@ -383,7 +383,7 @@ static int igt_global_reset(void *arg)
 
 	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		err = -EIO;
 
 	return err;
@@ -401,13 +401,13 @@ static int igt_wedged_reset(void *arg)
 
 	i915_gem_set_wedged(i915);
 
-	GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+	GEM_BUG_ON(!i915_is_wedged(i915));
 	i915_reset(i915, ALL_ENGINES, NULL);
 
 	intel_runtime_pm_put(i915, wakeref);
 	igt_global_reset_unlock(i915);
 
-	return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+	return i915_is_wedged(i915) ? -EIO : 0;
 }
 
 static bool wait_for_idle(struct intel_engine_cs *engine)
@@ -529,7 +529,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
 			break;
 	}
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		err = -EIO;
 
 	if (active) {
@@ -843,7 +843,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			break;
 	}
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		err = -EIO;
 
 	if (flags & TEST_ACTIVE) {
@@ -969,7 +969,7 @@ static int igt_reset_wait(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return -EIO;
 
 	return err;
@@ -1166,7 +1166,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
 unlock:
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return -EIO;
 
 	return err;
@@ -1372,7 +1372,7 @@ static int igt_reset_queue(void *arg)
 	mutex_unlock(&i915->drm.struct_mutex);
 	igt_global_reset_unlock(i915);
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return -EIO;
 
 	return err;
@@ -1552,7 +1552,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
 			i915_request_wait(rq,
 					  I915_WAIT_LOCKED,
 					  MAX_SCHEDULE_TIMEOUT);
-		if (i915_terminally_wedged(&i915->gpu_error))
+		if (i915_is_wedged(i915))
 			err = -EIO;
 	}
 
@@ -1591,7 +1591,7 @@ static int igt_atomic_reset(void *arg)
 
 	/* Flush any requests before we get started and check basics */
 	force_reset(i915);
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		goto unlock;
 
 	if (intel_has_gpu_reset(i915)) {
@@ -1665,7 +1665,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 	if (!intel_has_gpu_reset(i915))
 		return 0;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return -EIO; /* we're long past hope of a successful reset */
 
 	wakeref = intel_runtime_pm_get(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 58144e024751..2a34db172903 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -895,7 +895,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 	if (!HAS_EXECLISTS(i915))
 		return 0;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return 0;
 
 	return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index fb479a2c04fb..77b76ab868ee 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -181,7 +181,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
 	err = 0;
 	igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
 		err = i915_gem_object_set_to_cpu_domain(results, false);
-	if (i915_terminally_wedged(&ctx->i915->gpu_error))
+	if (i915_is_wedged(ctx->i915))
 		err = -EIO;
 	if (err)
 		goto out_put;
@@ -510,7 +510,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
 	};
 	int err;
 
-	if (i915_terminally_wedged(&i915->gpu_error))
+	if (i915_is_wedged(i915))
 		return 0;
 
 	mutex_lock(&i915->drm.struct_mutex);
-- 
2.20.1



More information about the Intel-gfx mailing list