[PATCH 8/8] drm/i915: Lift intel_engines_resume() to callers

Chris Wilson chris at chris-wilson.co.uk
Sat Jun 22 12:26:39 UTC 2019


Since the reset path wants to recover the engines itself, it only wants
to reinitialise the hardware using i915_gem_init_hw(). Pull the call to
intel_engines_resume() to the module init/resume path so we can avoid it
during reset.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c |   3 +
 drivers/gpu/drm/i915/i915_gem.c        | 173 ++++++++++---------------
 2 files changed, 74 insertions(+), 102 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 216aa6a1cb02..b09732a13314 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -264,6 +264,9 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (i915_gem_init_hw(i915))
 		goto err_wedged;
 
+	if (intel_engines_resume(i915))
+		goto err_wedged;
+
 	intel_uc_resume(i915);
 
 	/* Always reload a context for powersaving. */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e59be5c05e1b..dbb1584836a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1192,15 +1192,19 @@ static void init_unused_rings(struct intel_gt *gt)
 	}
 }
 
-static int init_hw(struct intel_gt *gt)
+int i915_gem_init_hw(struct drm_i915_private *i915)
 {
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_gt *gt = &i915->gt;
 	int ret;
 
+	BUG_ON(!i915->kernel_context);
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ret;
+
 	gt->last_init_time = ktime_get();
 
-	/* Double layer security blanket, see i915_gem_init() */
 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 
 	if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
@@ -1248,51 +1252,10 @@ static int init_hw(struct intel_gt *gt)
 
 	intel_mocs_init_l3cc_table(gt);
 
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	return 0;
-
-out:
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	return ret;
-}
-
-int i915_gem_init_hw(struct drm_i915_private *i915)
-{
-	struct intel_uncore *uncore = &i915->uncore;
-	int ret;
-
-	BUG_ON(!i915->kernel_context);
-	ret = i915_terminally_wedged(i915);
-	if (ret)
-		return ret;
-
-	/* Double layer security blanket, see i915_gem_init() */
-	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
-	ret = init_hw(&i915->gt);
-	if (ret)
-		goto err_init;
-
-	/* Only when the HW is re-initialised, can we replay the requests */
-	ret = intel_engines_resume(i915);
-	if (ret)
-		goto err_engines;
-
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
 	intel_engines_set_scheduler_caps(i915);
 
-	return 0;
-
-err_engines:
-	intel_uc_fini_hw(i915);
-err_init:
+out:
 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	intel_engines_set_scheduler_caps(i915);
-
 	return ret;
 }
 
@@ -1449,28 +1412,28 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
 	return err;
 }
 
-int i915_gem_init(struct drm_i915_private *dev_priv)
+int i915_gem_init(struct drm_i915_private *i915)
 {
 	int ret;
 
 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
-	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
-		mkwrite_device_info(dev_priv)->page_sizes =
+	if (intel_vgpu_active(i915) && !intel_vgpu_has_huge_gtt(i915))
+		mkwrite_device_info(i915)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
+	i915->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-	intel_timelines_init(dev_priv);
+	intel_timelines_init(i915);
 
-	ret = i915_gem_init_userptr(dev_priv);
+	ret = i915_gem_init_userptr(i915);
 	if (ret)
 		return ret;
 
-	ret = intel_uc_init_misc(dev_priv);
+	ret = intel_uc_init_misc(i915);
 	if (ret)
 		return ret;
 
-	ret = intel_wopcm_init(&dev_priv->wopcm);
+	ret = intel_wopcm_init(&i915->wopcm);
 	if (ret)
 		goto err_uc_misc;
 
@@ -1480,50 +1443,55 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * we hold the forcewake during initialisation these problems
 	 * just magically go away.
 	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
-	ret = i915_init_ggtt(dev_priv);
+	ret = i915_init_ggtt(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_init_scratch(dev_priv,
-				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
+	ret = i915_gem_init_scratch(i915,
+				    IS_GEN(i915, 2) ? SZ_256K : PAGE_SIZE);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_ggtt;
 	}
 
-	ret = intel_engines_setup(dev_priv);
+	ret = intel_engines_setup(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_contexts_init(dev_priv);
+	ret = i915_gem_contexts_init(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_scratch;
 	}
 
-	ret = intel_engines_init(dev_priv);
+	ret = intel_engines_init(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_context;
 	}
 
-	intel_init_gt_powersave(dev_priv);
+	intel_init_gt_powersave(i915);
 
-	ret = intel_uc_init(dev_priv);
+	ret = intel_uc_init(i915);
 	if (ret)
 		goto err_pm;
 
-	ret = i915_gem_init_hw(dev_priv);
+	ret = i915_gem_init_hw(i915);
 	if (ret)
 		goto err_uc_init;
 
+	/* Only when the HW is re-initialised, can we replay the requests */
+	ret = intel_engines_resume(i915);
+	if (ret)
+		goto err_init_hw;
+
 	/*
 	 * Despite its name intel_init_clock_gating applies both display
 	 * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1533,28 +1501,28 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 *
 	 * FIXME: break up the workarounds and apply them at the right time!
 	 */
-	intel_init_clock_gating(dev_priv);
+	intel_init_clock_gating(i915);
 
-	ret = intel_engines_verify_workarounds(dev_priv);
+	ret = intel_engines_verify_workarounds(i915);
 	if (ret)
-		goto err_init_hw;
+		goto err_engines;
 
-	ret = __intel_engines_record_defaults(dev_priv);
+	ret = __intel_engines_record_defaults(i915);
 	if (ret)
-		goto err_init_hw;
+		goto err_engines;
 
 	if (i915_inject_load_failure()) {
 		ret = -ENODEV;
-		goto err_init_hw;
+		goto err_engines;
 	}
 
 	if (i915_inject_load_failure()) {
 		ret = -EIO;
-		goto err_init_hw;
+		goto err_engines;
 	}
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 
@@ -1564,66 +1532,67 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_init_hw:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+err_engines:
+	mutex_unlock(&i915->drm.struct_mutex);
 
-	i915_gem_set_wedged(dev_priv);
-	i915_gem_suspend(dev_priv);
-	i915_gem_suspend_late(dev_priv);
+	i915_gem_set_wedged(i915);
+	i915_gem_suspend(i915);
+	i915_gem_suspend_late(i915);
 
-	i915_gem_drain_workqueue(dev_priv);
+	i915_gem_drain_workqueue(i915);
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uc_fini_hw(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+err_init_hw:
+	intel_uc_fini_hw(i915);
 err_uc_init:
-	intel_uc_fini(dev_priv);
+	intel_uc_fini(i915);
 err_pm:
 	if (ret != -EIO) {
-		intel_cleanup_gt_powersave(dev_priv);
-		intel_engines_cleanup(dev_priv);
+		intel_cleanup_gt_powersave(i915);
+		intel_engines_cleanup(i915);
 	}
 err_context:
 	if (ret != -EIO)
-		i915_gem_contexts_fini(dev_priv);
+		i915_gem_contexts_fini(i915);
 err_scratch:
-	i915_gem_fini_scratch(dev_priv);
+	i915_gem_fini_scratch(i915);
 err_ggtt:
 err_unlock:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 err_uc_misc:
-	intel_uc_fini_misc(dev_priv);
+	intel_uc_fini_misc(i915);
 
 	if (ret != -EIO) {
-		i915_gem_cleanup_userptr(dev_priv);
-		intel_timelines_fini(dev_priv);
+		i915_gem_cleanup_userptr(i915);
+		intel_timelines_fini(i915);
 	}
 
 	if (ret == -EIO) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
+		mutex_lock(&i915->drm.struct_mutex);
 
 		/*
 		 * Allow engine initialisation to fail by marking the GPU as
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
-		if (!i915_reset_failed(dev_priv)) {
-			i915_load_error(dev_priv,
+		if (!i915_reset_failed(i915)) {
+			i915_load_error(i915,
 					"Failed to initialize GPU, declaring it wedged!\n");
-			i915_gem_set_wedged(dev_priv);
+			i915_gem_set_wedged(i915);
 		}
 
 		/* Minimal basic recovery for KMS */
-		ret = i915_ggtt_enable_hw(dev_priv);
-		i915_gem_restore_gtt_mappings(dev_priv);
-		i915_gem_restore_fences(dev_priv);
-		intel_init_clock_gating(dev_priv);
+		ret = i915_ggtt_enable_hw(i915);
+		i915_gem_restore_gtt_mappings(i915);
+		i915_gem_restore_fences(i915);
+		intel_init_clock_gating(i915);
 
-		mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_unlock(&i915->drm.struct_mutex);
 	}
 
-	i915_gem_drain_freed_objects(dev_priv);
+	i915_gem_drain_freed_objects(i915);
 	return ret;
 }
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list