[PATCH 8/8] drm/i915: Lift intel_engines_resume() to callers

Chris Wilson chris at chris-wilson.co.uk
Sat Jun 22 12:53:40 UTC 2019


Since the reset path wants to recover the engines itself, it only wants
to reinitialise the hardware using i915_gem_init_hw(). Pull the call to
intel_engines_resume() to the module init/resume path so we can avoid it
during reset.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c |   7 +-
 drivers/gpu/drm/i915/gt/intel_gt_pm.c  |   5 +-
 drivers/gpu/drm/i915/gt/intel_gt_pm.h  |   2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c  |   4 +-
 drivers/gpu/drm/i915/i915_gem.c        | 174 ++++++++++---------------
 5 files changed, 83 insertions(+), 109 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 216aa6a1cb02..52e44b405aaf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -254,14 +254,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	i915_gem_restore_gtt_mappings(i915);
 	i915_gem_restore_fences(i915);
 
+	if (i915_gem_init_hw(i915))
+		goto err_wedged;
+
 	/*
 	 * As we didn't flush the kernel context before suspend, we cannot
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	intel_gt_resume(i915);
-
-	if (i915_gem_init_hw(i915))
+	if (intel_gt_resume(i915))
 		goto err_wedged;
 
 	intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 6062840b5b46..f4681f43b574 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
  */
 
 #include "i915_drv.h"
+#include "intel_engine_pm.h"
 #include "intel_gt_pm.h"
 #include "intel_pm.h"
 #include "intel_wakeref.h"
@@ -118,7 +119,7 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
 		intel_engine_reset(engine, false);
 }
 
-void intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
@@ -140,4 +141,6 @@ void intel_gt_resume(struct drm_i915_private *i915)
 		if (ce)
 			ce->ops->reset(ce);
 	}
+
+	return intel_engines_resume(i915);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b6049a907890..1fbfe0a6c039 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -23,6 +23,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
 void intel_gt_pm_init_early(struct intel_gt *gt);
 
 void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+int intel_gt_resume(struct drm_i915_private *i915);
 
 #endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3d2043896bcc..7f23d6e440f7 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -761,8 +761,10 @@ static int gt_reset(struct drm_i915_private *i915,
 static void reset_finish_engine(struct intel_engine_cs *engine, bool awake)
 {
 	engine->reset.finish(engine);
-	if (awake)
+	if (awake) {
+		engine->resume(engine);
 		intel_engine_pm_put(engine);
+	}
 
 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e59be5c05e1b..06920c8a1856 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_pm.h"
 #include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_mocs.h"
@@ -1192,15 +1191,19 @@ static void init_unused_rings(struct intel_gt *gt)
 	}
 }
 
-static int init_hw(struct intel_gt *gt)
+int i915_gem_init_hw(struct drm_i915_private *i915)
 {
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_gt *gt = &i915->gt;
 	int ret;
 
+	BUG_ON(!i915->kernel_context);
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ret;
+
 	gt->last_init_time = ktime_get();
 
-	/* Double layer security blanket, see i915_gem_init() */
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
 	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
@@ -1248,51 +1251,10 @@ static int init_hw(struct intel_gt *gt)
 
 	intel_mocs_init_l3cc_table(gt);
 
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	return 0;
-
-out:
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	return ret;
-}
-
-int i915_gem_init_hw(struct drm_i915_private *i915)
-{
-	struct intel_uncore *uncore = &i915->uncore;
-	int ret;
-
-	BUG_ON(!i915->kernel_context);
-	ret = i915_terminally_wedged(i915);
-	if (ret)
-		return ret;
-
-	/* Double layer security blanket, see i915_gem_init() */
-	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
-	ret = init_hw(&i915->gt);
-	if (ret)
-		goto err_init;
-
-	/* Only when the HW is re-initialised, can we replay the requests */
-	ret = intel_engines_resume(i915);
-	if (ret)
-		goto err_engines;
-
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
 	intel_engines_set_scheduler_caps(i915);
 
-	return 0;
-
-err_engines:
-	intel_uc_fini_hw(i915);
-err_init:
+out:
 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	intel_engines_set_scheduler_caps(i915);
-
 	return ret;
 }
 
@@ -1449,28 +1411,28 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
 	return err;
 }
 
-int i915_gem_init(struct drm_i915_private *dev_priv)
+int i915_gem_init(struct drm_i915_private *i915)
 {
 	int ret;
 
 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
-	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
-		mkwrite_device_info(dev_priv)->page_sizes =
+	if (intel_vgpu_active(i915) && !intel_vgpu_has_huge_gtt(i915))
+		mkwrite_device_info(i915)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
+	i915->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-	intel_timelines_init(dev_priv);
+	intel_timelines_init(i915);
 
-	ret = i915_gem_init_userptr(dev_priv);
+	ret = i915_gem_init_userptr(i915);
 	if (ret)
 		return ret;
 
-	ret = intel_uc_init_misc(dev_priv);
+	ret = intel_uc_init_misc(i915);
 	if (ret)
 		return ret;
 
-	ret = intel_wopcm_init(&dev_priv->wopcm);
+	ret = intel_wopcm_init(&i915->wopcm);
 	if (ret)
 		goto err_uc_misc;
 
@@ -1480,50 +1442,55 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * we hold the forcewake during initialisation these problems
 	 * just magically go away.
 	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
-	ret = i915_init_ggtt(dev_priv);
+	ret = i915_init_ggtt(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_init_scratch(dev_priv,
-				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
+	ret = i915_gem_init_scratch(i915,
+				    IS_GEN(i915, 2) ? SZ_256K : PAGE_SIZE);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_ggtt;
 	}
 
-	ret = intel_engines_setup(dev_priv);
+	ret = intel_engines_setup(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_contexts_init(dev_priv);
+	ret = i915_gem_contexts_init(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_scratch;
 	}
 
-	ret = intel_engines_init(dev_priv);
+	ret = intel_engines_init(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_context;
 	}
 
-	intel_init_gt_powersave(dev_priv);
+	intel_init_gt_powersave(i915);
 
-	ret = intel_uc_init(dev_priv);
+	ret = intel_uc_init(i915);
 	if (ret)
 		goto err_pm;
 
-	ret = i915_gem_init_hw(dev_priv);
+	ret = i915_gem_init_hw(i915);
 	if (ret)
 		goto err_uc_init;
 
+	/* Only when the HW is re-initialised, can we replay the requests */
+	ret = intel_gt_resume(i915);
+	if (ret)
+		goto err_init_hw;
+
 	/*
 	 * Despite its name intel_init_clock_gating applies both display
 	 * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1533,28 +1500,28 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 *
 	 * FIXME: break up the workarounds and apply them at the right time!
 	 */
-	intel_init_clock_gating(dev_priv);
+	intel_init_clock_gating(i915);
 
-	ret = intel_engines_verify_workarounds(dev_priv);
+	ret = intel_engines_verify_workarounds(i915);
 	if (ret)
-		goto err_init_hw;
+		goto err_gt;
 
-	ret = __intel_engines_record_defaults(dev_priv);
+	ret = __intel_engines_record_defaults(i915);
 	if (ret)
-		goto err_init_hw;
+		goto err_gt;
 
 	if (i915_inject_load_failure()) {
 		ret = -ENODEV;
-		goto err_init_hw;
+		goto err_gt;
 	}
 
 	if (i915_inject_load_failure()) {
 		ret = -EIO;
-		goto err_init_hw;
+		goto err_gt;
 	}
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 
@@ -1564,66 +1531,67 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_init_hw:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+err_gt:
+	mutex_unlock(&i915->drm.struct_mutex);
 
-	i915_gem_set_wedged(dev_priv);
-	i915_gem_suspend(dev_priv);
-	i915_gem_suspend_late(dev_priv);
+	i915_gem_set_wedged(i915);
+	i915_gem_suspend(i915);
+	i915_gem_suspend_late(i915);
 
-	i915_gem_drain_workqueue(dev_priv);
+	i915_gem_drain_workqueue(i915);
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uc_fini_hw(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+err_init_hw:
+	intel_uc_fini_hw(i915);
 err_uc_init:
-	intel_uc_fini(dev_priv);
+	intel_uc_fini(i915);
 err_pm:
 	if (ret != -EIO) {
-		intel_cleanup_gt_powersave(dev_priv);
-		intel_engines_cleanup(dev_priv);
+		intel_cleanup_gt_powersave(i915);
+		intel_engines_cleanup(i915);
 	}
 err_context:
 	if (ret != -EIO)
-		i915_gem_contexts_fini(dev_priv);
+		i915_gem_contexts_fini(i915);
 err_scratch:
-	i915_gem_fini_scratch(dev_priv);
+	i915_gem_fini_scratch(i915);
 err_ggtt:
 err_unlock:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 err_uc_misc:
-	intel_uc_fini_misc(dev_priv);
+	intel_uc_fini_misc(i915);
 
 	if (ret != -EIO) {
-		i915_gem_cleanup_userptr(dev_priv);
-		intel_timelines_fini(dev_priv);
+		i915_gem_cleanup_userptr(i915);
+		intel_timelines_fini(i915);
 	}
 
 	if (ret == -EIO) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
+		mutex_lock(&i915->drm.struct_mutex);
 
 		/*
 		 * Allow engine initialisation to fail by marking the GPU as
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
-		if (!i915_reset_failed(dev_priv)) {
-			i915_load_error(dev_priv,
+		if (!i915_reset_failed(i915)) {
+			i915_load_error(i915,
 					"Failed to initialize GPU, declaring it wedged!\n");
-			i915_gem_set_wedged(dev_priv);
+			i915_gem_set_wedged(i915);
 		}
 
 		/* Minimal basic recovery for KMS */
-		ret = i915_ggtt_enable_hw(dev_priv);
-		i915_gem_restore_gtt_mappings(dev_priv);
-		i915_gem_restore_fences(dev_priv);
-		intel_init_clock_gating(dev_priv);
+		ret = i915_ggtt_enable_hw(i915);
+		i915_gem_restore_gtt_mappings(i915);
+		i915_gem_restore_fences(i915);
+		intel_init_clock_gating(i915);
 
-		mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_unlock(&i915->drm.struct_mutex);
 	}
 
-	i915_gem_drain_freed_objects(dev_priv);
+	i915_gem_drain_freed_objects(i915);
 	return ret;
 }
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list