[Intel-gfx] [PATCH 28/36] drm/i915: Enabling rc6 and rps have different requirements, so separate them

Chris Wilson chris at chris-wilson.co.uk
Wed Mar 14 09:37:40 UTC 2018


On Ironlake, we are required to not enable rc6 until the GPU is loaded
with a valid context; after that point it can start to use a powersaving
context for rc6. This seems a reasonable requirement to impose on all
generations as we are already priming the system by loading a context on
resume. We can simply then delay enabling rc6 until we know the GPU is
awake.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.c      |  1 +
 drivers/gpu/drm/i915/i915_gem.c      | 43 ++++++++++++++++++++++++++++--------
 drivers/gpu/drm/i915/i915_request.c  |  3 ---
 drivers/gpu/drm/i915/intel_display.c |  5 -----
 drivers/gpu/drm/i915/intel_gt_pm.c   |  2 ++
 5 files changed, 37 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index db88b8c3c4ae..11eaaf679450 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -632,6 +632,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
 	i915_gem_drain_workqueue(dev_priv);
 
 	mutex_lock(&dev_priv->drm.struct_mutex);
+	intel_gt_pm_fini(dev_priv);
 	intel_uc_fini_hw(dev_priv);
 	intel_uc_fini(dev_priv);
 	i915_gem_cleanup_engines(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9c7b21e5cc8..8a5bf1e26515 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3165,10 +3165,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 	i915_gem_restore_fences(dev_priv);
 
-	if (dev_priv->gt.awake) {
-		intel_gt_pm_sanitize(dev_priv);
-		intel_gt_pm_enable_rps(dev_priv);
+	if (dev_priv->gt_pm.rc6.enabled) {
+		dev_priv->gt_pm.rc6.enabled = false;
 		intel_gt_pm_enable_rc6(dev_priv);
+	}
+
+	if (dev_priv->gt.awake) {
 		if (INTEL_GEN(dev_priv) >= 6)
 			gen6_rps_busy(dev_priv);
 	}
@@ -3283,9 +3285,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 		i915_gem_reset_finish_engine(engine);
 	}
 
+	intel_gt_pm_sanitize(i915);
+
 	wake_up_all(&i915->gpu_error.reset_queue);
 }
 
+static int load_power_context(struct drm_i915_private *i915)
+{
+	int err;
+
+	intel_gt_pm_sanitize(i915);
+	intel_gt_pm_enable_rps(i915);
+
+	err = i915_gem_switch_to_kernel_context(i915);
+	if (err)
+		goto err;
+
+	err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+	if (err)
+		goto err;
+
+	intel_gt_pm_enable_rc6(i915);
+
+	return 0;
+
+err:
+	intel_gt_pm_sanitize(i915);
+	return err;
+}
+
 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gem_timeline *tl;
@@ -5007,7 +5035,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	intel_uc_resume(i915);
 
 	/* Always reload a context for powersaving. */
-	if (i915_gem_switch_to_kernel_context(i915))
+	if (load_power_context(i915))
 		goto err_wedged;
 
 out_unlock:
@@ -5194,11 +5222,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 			goto err_active;
 	}
 
-	err = i915_gem_switch_to_kernel_context(i915);
-	if (err)
-		goto err_active;
-
-	err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+	/* Flush the default context image to memory, and enable powersaving. */
+	err = load_power_context(i915);
 	if (err)
 		goto err_active;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 624c7cd207d2..6b589cffd00e 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -274,9 +274,6 @@ static void mark_busy(struct drm_i915_private *i915)
 	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
 		i915->gt.epoch = 1;
 
-	intel_gt_pm_enable_rps(i915);
-	intel_gt_pm_enable_rc6(i915);
-
 	i915_update_gfx_val(i915);
 	if (INTEL_GEN(i915) >= 6)
 		gen6_rps_busy(i915);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 892c274eb47b..00e7f61fa8df 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15426,9 +15426,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	flush_work(&dev_priv->atomic_helper.free_work);
 	WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
 
-	intel_gt_pm_disable_rps(dev_priv);
-	intel_gt_pm_disable_rc6(dev_priv);
-
 	/*
 	 * Interrupts and polling as the first thing to avoid creating havoc.
 	 * Too much stuff here (turning of connectors, ...) would
@@ -15456,8 +15453,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
 	intel_cleanup_overlay(dev_priv);
 
-	intel_gt_pm_fini(dev_priv);
-
 	intel_teardown_gmbus(dev_priv);
 
 	destroy_workqueue(dev_priv->modeset_wq);
diff --git a/drivers/gpu/drm/i915/intel_gt_pm.c b/drivers/gpu/drm/i915/intel_gt_pm.c
index c5d0382c934d..883f442ed41e 100644
--- a/drivers/gpu/drm/i915/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/intel_gt_pm.c
@@ -2615,6 +2615,8 @@ void intel_gt_pm_disable_rps(struct drm_i915_private *dev_priv)
 
 void intel_gt_pm_fini(struct drm_i915_private *dev_priv)
 {
+	intel_gt_pm_sanitize(dev_priv);
+
 	if (IS_VALLEYVIEW(dev_priv))
 		valleyview_cleanup_gt_powersave(dev_priv);
 
-- 
2.16.2



More information about the Intel-gfx mailing list