[PATCH 079/123] drm/i915: Simplify rc6/rps enabling

Chris Wilson chris at chris-wilson.co.uk
Thu Oct 11 20:21:01 UTC 2018


As we know that whenever the GT is awake, rc6 and rps are enabled (if
available), then we can remove the individual tracking and enabling to
the gen6_rps_busy/gen6_rps_idle() (now called intel_gt_pm_busy and
intel_gt_pm_idle) entry points.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |   6 +-
 drivers/gpu/drm/i915/i915_drv.c      |   3 -
 drivers/gpu/drm/i915/i915_gem.c      |  18 +-
 drivers/gpu/drm/i915/i915_sysfs.c    |   6 +-
 drivers/gpu/drm/i915/intel_display.c |   4 +-
 drivers/gpu/drm/i915/intel_gt_pm.c   | 300 +++++++++------------------
 drivers/gpu/drm/i915/intel_gt_pm.h   |  25 +--
 7 files changed, 121 insertions(+), 241 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 97baa93b1a9f..ec65d1780320 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2083,9 +2083,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 		}
 	}
 
-	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
 	seq_printf(m, "GPU busy? %s [%d requests]\n",
 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
+	seq_printf(m, "RPS active? %s\n", yesno(rps->active));
 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
 	seq_printf(m, "Boosts outstanding? %d\n",
 		   atomic_read(&rps->num_waiters));
@@ -2120,9 +2120,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 		   atomic_read(&rps->boosts));
 	mutex_unlock(&dev->filelist_mutex);
 
-	if (INTEL_GEN(dev_priv) >= 6 &&
-	    rps->enabled &&
-	    dev_priv->gt.active_requests) {
+	if (INTEL_GEN(dev_priv) >= 6 && rps->active) {
 		u32 rpup, rpupei;
 		u32 rpdown, rpdownei;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d879c4c9200c..2d823a10ad3c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2708,9 +2708,6 @@ static int intel_runtime_suspend(struct device *kdev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret;
 
-	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
-		return -ENODEV;
-
 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 728ba0e904b7..0407087fe48a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -169,6 +169,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 
 	intel_engines_park(i915);
 	i915_timelines_park(i915);
+	intel_gt_pm_park(i915);
 
 	i915_pmu_gt_parked(i915);
 	i915_vma_parked(i915);
@@ -176,11 +177,6 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 	assert_kernel_context_is_current(i915);
 
 	wakeref = fetch_and_zero(&i915->gt.awake);
-	GEM_BUG_ON(!wakeref);
-
-	if (INTEL_GEN(i915) >= 6)
-		gen6_rps_idle(i915);
-
 	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
 	return i915->gt.epoch;
@@ -228,9 +224,9 @@ void i915_gem_unpark(struct drm_i915_private *i915)
 	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
 		i915->gt.epoch = 1;
 
+	intel_gt_pm_unpark(i915);
 	i915_update_gfx_val(i915);
-	if (INTEL_GEN(i915) >= 6)
-		gen6_rps_busy(i915);
+
 	i915_pmu_gt_unparked(i915);
 
 	intel_engines_unpark(i915);
@@ -493,10 +489,8 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
 	 * forcing the clocks too high for the whole system, we only allow
 	 * each client to waitboost once in a busy period.
 	 */
-	if (rps_client && !i915_request_started(rq)) {
-		if (INTEL_GEN(rq->i915) >= 6)
-			gen6_rps_boost(rq, rps_client);
-	}
+	if (rps_client && !i915_request_started(rq))
+		intel_rps_boost(rq, rps_client);
 
 	timeout = i915_request_wait(rq, flags, timeout);
 
@@ -3356,7 +3350,7 @@ static long wait_for_timeline(struct i915_timeline *tl,
 	 * stalls, so allow the gpu to boost to maximum clocks.
 	 */
 	if (flags & I915_WAIT_FOR_IDLE_BOOST)
-		gen6_rps_boost(rq, NULL);
+		intel_rps_boost(rq, NULL);
 
 	timeout = i915_request_wait(rq, flags, timeout);
 	i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 26424d6f16ab..e0f962088c6a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -378,7 +378,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 			  intel_gpu_freq(dev_priv, val));
 
 	rps->max_freq_softlimit = val;
-	schedule_work(&rps->work);
+	if (rps->active)
+		schedule_work(&rps->work);
 
 unlock:
 	mutex_unlock(&rps->lock);
@@ -420,7 +421,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 	}
 
 	rps->min_freq_softlimit = val;
-	schedule_work(&rps->work);
+	if (rps->active)
+		schedule_work(&rps->work);
 
 unlock:
 	mutex_unlock(&rps->lock);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7328a4cb70aa..6ffd2de8be16 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12988,7 +12988,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
 	 * vblank without our intervention, so leave RPS alone.
 	 */
 	if (!i915_request_started(rq))
-		gen6_rps_boost(rq, NULL);
+		intel_rps_boost(rq, NULL);
 	i915_request_put(rq);
 
 	drm_crtc_vblank_put(wait->crtc);
@@ -13006,7 +13006,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
 	if (!dma_fence_is_i915(fence))
 		return;
 
-	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+	if (!HAS_RPS(to_i915(crtc->dev)))
 		return;
 
 	if (drm_crtc_vblank_get(crtc))
diff --git a/drivers/gpu/drm/i915/intel_gt_pm.c b/drivers/gpu/drm/i915/intel_gt_pm.c
index 9b439eff61dc..11a9122c0c68 100644
--- a/drivers/gpu/drm/i915/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/intel_gt_pm.c
@@ -361,15 +361,11 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *i915, u8 val)
  */
 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	if (val != rps->cur_freq) {
+	if (val != dev_priv->gt_pm.rps.cur_freq) {
 		if (INTEL_GEN(dev_priv) >= 9)
-			I915_WRITE(GEN6_RPNSWREQ,
-				   GEN9_FREQUENCY(val));
+			I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val));
 		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-			I915_WRITE(GEN6_RPNSWREQ,
-				   HSW_FREQUENCY(val));
+			I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val));
 		else
 			I915_WRITE(GEN6_RPNSWREQ,
 				   GEN6_FREQUENCY(val) |
@@ -386,9 +382,6 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
 	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-	rps->cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
 	return 0;
 }
 
@@ -411,48 +404,17 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
 	gen6_set_rps_thresholds(dev_priv, val);
 	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-	dev_priv->gt_pm.rps.cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
 	return 0;
 }
 
-/*
- * vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
- */
-static void vlv_set_rps_idle(struct drm_i915_private *i915)
+static int __intel_set_rps(struct drm_i915_private *i915, u8 val)
 {
-	struct intel_rps *rps = &i915->gt_pm.rps;
-	u32 val = rps->idle_freq;
-	int err;
-
-	if (rps->cur_freq <= val)
-		return;
-
-	/*
-	 * The punit delays the write of the frequency and voltage until it
-	 * determines the GPU is awake. During normal usage we don't want to
-	 * waste power changing the frequency if the GPU is sleeping (rc6).
-	 * However, the GPU and driver is now idle and we do not want to delay
-	 * switching to minimum voltage (reducing power whilst idle) as we do
-	 * not expect to be woken in the near future and so must flush the
-	 * change by waking the device.
-	 *
-	 * We choose to take the media powerwell (either would do to trick the
-	 * punit into committing the voltage change) as that takes a lot less
-	 * power than the render powerwell.
-	 */
-	intel_uncore_forcewake_get(i915, FORCEWAKE_MEDIA);
-	err = valleyview_set_rps(i915, val);
-	intel_uncore_forcewake_put(i915, FORCEWAKE_MEDIA);
-
-	if (err)
-		DRM_ERROR("Failed to set RPS for idle\n");
+	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+		return valleyview_set_rps(i915, val);
+	else if (INTEL_GEN(i915) >= 6)
+		return gen6_set_rps(i915, val);
+	else
+		return 0;
 }
 
 static int intel_set_rps(struct drm_i915_private *i915, u8 val)
@@ -461,20 +423,20 @@ static int intel_set_rps(struct drm_i915_private *i915, u8 val)
 	int err;
 
 	lockdep_assert_held(&rps->lock);
+	GEM_BUG_ON(!rps->active);
 	GEM_BUG_ON(val > rps->max_freq);
 	GEM_BUG_ON(val < rps->min_freq);
 
-	if (!rps->enabled) {
+	err = __intel_set_rps(i915, val);
+	if (err)
+		return err;
+
+	if (val != rps->cur_freq) {
+		trace_intel_gpu_freq_change(intel_gpu_freq(i915, val));
 		rps->cur_freq = val;
-		return 0;
 	}
 
-	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
-		err = valleyview_set_rps(i915, val);
-	else
-		err = gen6_set_rps(i915, val);
-
-	return err;
+	return 0;
 }
 
 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
@@ -637,11 +599,7 @@ static void enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (READ_ONCE(rps->interrupts_enabled))
-		return;
-
 	spin_lock_irq(&dev_priv->irq_lock);
-	WARN_ON_ONCE(rps->pm_iir);
 
 	if (IS_VALLEYVIEW(dev_priv))
 		/* WaGsvRC0ResidencyMethod:vlv */
@@ -650,45 +608,19 @@ static void enable_rps_interrupts(struct drm_i915_private *dev_priv)
 		rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
 				  GEN6_PM_RP_DOWN_THRESHOLD |
 				  GEN6_PM_RP_DOWN_TIMEOUT);
-
-	if (INTEL_GEN(dev_priv) >= 11)
-		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
-	else
-		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & rps->pm_events);
-
 	gen6_enable_pm_irq(dev_priv, rps->pm_events);
 
-	rps->interrupts_enabled = true;
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
 static void disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	if (!READ_ONCE(rps->interrupts_enabled))
-		return;
-
-	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
-		return;
-
 	spin_lock_irq(&dev_priv->irq_lock);
-	rps->interrupts_enabled = false;
-
 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
 	gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
-
 	spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
 
-	/* Now that we will not be generating any more work, flush any
-	 * outstanding tasks. As we are called on the RPS idle path,
-	 * we will reset the GPU to minimum frequencies, so the current
-	 * state of the worker can be discarded.
-	 */
-	cancel_work_sync(&rps->work);
-	gen6_reset_rps_interrupts(dev_priv);
+	synchronize_irq(dev_priv->drm.irq);
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -762,6 +694,9 @@ static void intel_rps_work(struct work_struct *work)
 
 	mutex_lock(&rps->lock);
 
+	if (!rps->active)
+		goto unlock;
+
 	min = rps->min_freq_softlimit;
 	max = rps->max_freq_softlimit;
 	if (client_boost && max < rps->boost_freq)
@@ -818,106 +753,125 @@ static void intel_rps_work(struct work_struct *work)
 		adj = 0;
 	}
 
-	mutex_unlock(&rps->lock);
-
 	if (pm_iir) {
 		spin_lock_irq(&i915->irq_lock);
-		if (rps->interrupts_enabled)
-			gen6_unmask_pm_irq(i915, rps->pm_events);
+		gen6_unmask_pm_irq(i915, rps->pm_events);
 		spin_unlock_irq(&i915->irq_lock);
 		rps->last_adj = adj;
 	}
+
+unlock:
+	mutex_unlock(&rps->lock);
 }
 
 void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (pm_iir & rps->pm_events) {
+	if (rps->active && pm_iir & rps->pm_events) {
 		spin_lock(&dev_priv->irq_lock);
 		gen6_mask_pm_irq(dev_priv, pm_iir & rps->pm_events);
-		if (rps->interrupts_enabled) {
-			rps->pm_iir |= pm_iir & rps->pm_events;
-			schedule_work(&rps->work);
-		}
+		rps->pm_iir |= pm_iir & rps->pm_events;
 		spin_unlock(&dev_priv->irq_lock);
+
+		schedule_work(&rps->work);
 	}
 }
 
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
+void intel_gt_pm_unpark(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	u8 freq;
 
 	if (!HAS_RPS(dev_priv))
 		return;
 
+	GEM_BUG_ON(rps->pm_iir);
+	GEM_BUG_ON(rps->active);
+
 	mutex_lock(&rps->lock);
-	if (rps->enabled) {
-		u8 freq;
+	rps->active = true;
 
-		I915_WRITE(GEN6_PMINTRMSK,
-			   gen6_rps_pm_mask(dev_priv, rps->cur_freq));
+	/*
+	 * Use the user's desired frequency as a guide, but for better
+	 * performance, jump directly to RPe as our starting frequency.
+	 */
+	freq = max(rps->cur_freq, rps->efficient_freq);
+	if (intel_set_rps(dev_priv,
+			  clamp(freq,
+				rps->min_freq_softlimit,
+				rps->max_freq_softlimit)))
+		DRM_DEBUG_DRIVER("Failed to set busy frequency\n");
 
-		enable_rps_interrupts(dev_priv);
-		memset(&rps->ei, 0, sizeof(rps->ei));
+	rps->last_adj = 0;
 
-		/*
-		 * Use the user's desired frequency as a guide, but for better
-		 * performance, jump directly to RPe as our starting frequency.
-		 */
-		freq = max(rps->cur_freq, rps->efficient_freq);
-
-		if (intel_set_rps(dev_priv,
-				  clamp(freq,
-					rps->min_freq_softlimit,
-					rps->max_freq_softlimit)))
-			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
+	if (INTEL_GEN(dev_priv) >= 6) {
+		memset(&rps->ei, 0, sizeof(rps->ei));
+		enable_rps_interrupts(dev_priv);
 	}
+
 	mutex_unlock(&rps->lock);
 }
 
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
+void intel_gt_pm_park(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (!HAS_RPS(dev_priv))
+	if (!rps->active)
 		return;
 
-	/*
-	 * Flush our bottom-half so that it does not race with us
-	 * setting the idle frequency and so that it is bounded by
-	 * our rpm wakeref. And then disable the interrupts to stop any
-	 * futher RPS reclocking whilst we are asleep.
-	 */
+	mutex_lock(&rps->lock);
+
 	disable_rps_interrupts(dev_priv);
 
-	mutex_lock(&rps->lock);
-	if (rps->enabled) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			vlv_set_rps_idle(dev_priv);
-		else
-			gen6_set_rps(dev_priv, rps->idle_freq);
-		rps->last_adj = 0;
+	if (rps->cur_freq > rps->idle_freq) {
+		/*
+		 * The punit delays the write of the frequency and voltage
+		 * until it determines the GPU is awake. During normal usage we
+		 * don't want to waste power changing the frequency if the GPU
+		 * is sleeping (rc6).  However, the GPU and driver is now idle
+		 * and we do not want to delay switching to minimum voltage
+		 * (reducing power whilst idle) as we do not expect to be woken
+		 * in the near future and so must flush the change by waking
+		 * the device.
+		 *
+		 * We choose to take the media powerwell (either would do to
+		 * trick the punit into committing the voltage change) as that
+		 * takes a lot less power than the render powerwell.
+		 */
+		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+		if (__intel_set_rps(dev_priv, rps->idle_freq))
+			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
+		rps->cur_freq = rps->idle_freq;
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+	}
+
+	if (INTEL_GEN(dev_priv) >= 6) {
 		I915_WRITE(GEN6_PMINTRMSK,
 			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 	}
+
+	rps->last_adj = 0;
+	rps->active = false;
 	mutex_unlock(&rps->lock);
+
+	/*
+	 * Now that we will not be generating any more work, flush any
+	 * outstanding tasks. As we are called on the RPS idle path,
+	 * we will reset the GPU to minimum frequencies, so the current
+	 * state of the worker can be discarded.
+	 */
+	cancel_work_sync(&rps->work);
+	gen6_reset_rps_interrupts(dev_priv);
 }
 
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
+void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
 {
 	struct intel_rps *rps = &rq->i915->gt_pm.rps;
 	unsigned long flags;
 	bool boost;
 
-	if (!HAS_RPS(rq->i915))
-		return;
-
-	/*
-	 * This is intentionally racy! We peek at the state here, then
-	 * validate inside the RPS worker.
-	 */
-	if (!rps->enabled)
+	if (!READ_ONCE(rps->active))
 		return;
 
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -1129,20 +1083,6 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void reset_rps(struct drm_i915_private *i915,
-		      int (*set)(struct drm_i915_private *, u8))
-{
-	struct intel_rps *rps = &i915->gt_pm.rps;
-	u8 freq = rps->cur_freq;
-
-	/* force a reset */
-	rps->power.mode = -1;
-	rps->cur_freq = -1;
-
-	if (set(i915, freq))
-		DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 {
@@ -1164,7 +1104,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
 	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers.
 	 */
-	reset_rps(dev_priv, gen6_set_rps);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -1334,8 +1273,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 		   GEN6_RP_UP_BUSY_AVG |
 		   GEN6_RP_DOWN_IDLE_AVG);
 
-	reset_rps(dev_priv, gen6_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -1435,8 +1372,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-	reset_rps(dev_priv, gen6_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -1953,8 +1888,6 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	reset_rps(dev_priv, valleyview_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -2039,8 +1972,6 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	reset_rps(dev_priv, valleyview_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -2526,11 +2457,7 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
 void intel_gt_pm_sanitize(struct drm_i915_private *i915)
 {
 	intel_gt_pm_disable_llc(i915);
-
-	i915->gt_pm.rc6.enabled = true; /* force RC6 disabling */
 	intel_gt_pm_disable_rc6(i915);
-
-	i915->gt_pm.rps.enabled = true; /* force RPS disabling */
 	intel_gt_pm_disable_rps(i915);
 
 	if (INTEL_GEN(i915) >= 11)
@@ -2633,9 +2560,6 @@ static void __enable_rc6(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->gt_pm.rps.lock);
 
-	if (i915->gt_pm.rc6.enabled)
-		return;
-
 	if (IS_CHERRYVIEW(i915))
 		cherryview_enable_rc6(i915);
 	else if (IS_VALLEYVIEW(i915))
@@ -2646,8 +2570,6 @@ static void __enable_rc6(struct drm_i915_private *i915)
 		gen8_enable_rc6(i915);
 	else if (INTEL_GEN(i915) >= 6)
 		gen6_enable_rc6(i915);
-
-	i915->gt_pm.rc6.enabled = true;
 }
 
 void intel_gt_pm_enable_rc6(struct drm_i915_private *i915)
@@ -2666,9 +2588,6 @@ static void __enable_rps(struct drm_i915_private *i915)
 
 	lockdep_assert_held(&rps->lock);
 
-	if (rps->enabled)
-		return;
-
 	if (IS_CHERRYVIEW(i915)) {
 		cherryview_enable_rps(i915);
 	} else if (IS_VALLEYVIEW(i915)) {
@@ -2690,7 +2609,12 @@ static void __enable_rps(struct drm_i915_private *i915)
 	WARN_ON(rps->efficient_freq < rps->min_freq);
 	WARN_ON(rps->efficient_freq > rps->max_freq);
 
-	rps->enabled = true;
+	/* Force a reset */
+	rps->cur_freq = rps->max_freq;
+	rps->power.mode = -1;
+	__intel_set_rps(i915, rps->idle_freq);
+
+	rps->cur_freq = rps->idle_freq;
 }
 
 void intel_gt_pm_enable_rps(struct drm_i915_private *i915)
@@ -2707,11 +2631,7 @@ static void __enable_llc(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->gt_pm.rps.lock);
 
-	if (i915->gt_pm.llc_pstate.enabled)
-		return;
-
 	gen6_update_ring_freq(i915);
-	i915->gt_pm.llc_pstate.enabled = true;
 }
 
 void intel_gt_pm_enable_llc(struct drm_i915_private *i915)
@@ -2728,9 +2648,6 @@ static void __disable_rc6(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->gt_pm.rps.lock);
 
-	if (!i915->gt_pm.rc6.enabled)
-		return;
-
 	if (INTEL_GEN(i915) >= 9)
 		gen9_disable_rc6(i915);
 	else if (IS_CHERRYVIEW(i915))
@@ -2739,8 +2656,6 @@ static void __disable_rc6(struct drm_i915_private *i915)
 		valleyview_disable_rc6(i915);
 	else if (INTEL_GEN(i915) >= 6)
 		gen6_disable_rc6(i915);
-
-	i915->gt_pm.rc6.enabled = false;
 }
 
 void intel_gt_pm_disable_rc6(struct drm_i915_private *i915)
@@ -2754,9 +2669,6 @@ static void __disable_rps(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->gt_pm.rps.lock);
 
-	if (!i915->gt_pm.rps.enabled)
-		return;
-
 	if (INTEL_GEN(i915) >= 9)
 		gen9_disable_rps(i915);
 	else if (IS_CHERRYVIEW(i915))
@@ -2767,8 +2679,6 @@ static void __disable_rps(struct drm_i915_private *i915)
 		gen6_disable_rps(i915);
 	else if (INTEL_GEN(i915) >= 5)
 		ironlake_disable_drps(i915);
-
-	i915->gt_pm.rps.enabled = false;
 }
 
 void intel_gt_pm_disable_rps(struct drm_i915_private *i915)
@@ -2778,21 +2688,9 @@ void intel_gt_pm_disable_rps(struct drm_i915_private *i915)
 	mutex_unlock(&i915->gt_pm.rps.lock);
 }
 
-static void __disable_llc(struct drm_i915_private *i915)
-{
-	lockdep_assert_held(&i915->gt_pm.rps.lock);
-
-	if (!i915->gt_pm.llc_pstate.enabled)
-		return;
-
-	i915->gt_pm.llc_pstate.enabled = false;
-}
-
 void intel_gt_pm_disable_llc(struct drm_i915_private *i915)
 {
-	mutex_lock(&i915->gt_pm.rps.lock);
-	__disable_llc(i915);
-	mutex_unlock(&i915->gt_pm.rps.lock);
+	/* Nothing to do here. */
 }
 
 void intel_gt_pm_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_gt_pm.h b/drivers/gpu/drm/i915/intel_gt_pm.h
index 09cc03173164..7962a47d5a58 100644
--- a/drivers/gpu/drm/i915/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/intel_gt_pm.h
@@ -19,13 +19,10 @@ struct intel_rps_ei {
 
 struct intel_rps {
 	struct mutex lock;
-
-	/*
-	 * work, interrupts_enabled and pm_iir are protected by i915->irq_lock
-	 */
 	struct work_struct work;
-	bool interrupts_enabled;
-	u32 pm_iir;
+
+	bool active;
+	u32 pm_iir; /* protected by i915->irq_lock */
 
 	/* PM interrupt bits that should never be masked */
 	u32 pm_intrmsk_mbz;
@@ -68,7 +65,6 @@ struct intel_rps {
 		u8 down_threshold; /* Current %busy required to downclock */
 	} power;
 
-	bool enabled;
 	atomic_t num_waiters;
 	atomic_t boosts;
 
@@ -77,19 +73,13 @@ struct intel_rps {
 };
 
 struct intel_rc6 {
-	bool enabled;
 	u64 prev_hw_residency[4];
 	u64 cur_residency[4];
 };
 
-struct intel_llc_pstate {
-	bool enabled;
-};
-
 struct intel_gt_pm {
-	struct intel_rps rps;
 	struct intel_rc6 rc6;
-	struct intel_llc_pstate llc_pstate;
+	struct intel_rps rps;
 
 	u32 imr;
 	u32 ier;
@@ -113,11 +103,12 @@ void intel_gt_pm_disable_rc6(struct drm_i915_private *i915);
 void intel_gt_pm_enable_llc(struct drm_i915_private *i915);
 void intel_gt_pm_disable_llc(struct drm_i915_private *i915);
 
+void intel_gt_pm_unpark(struct drm_i915_private *i915);
+void intel_gt_pm_park(struct drm_i915_private *i915);
+
 void intel_gt_pm_irq_handler(struct drm_i915_private *i915, u32 pm_iir);
 
-void gen6_rps_busy(struct drm_i915_private *i915);
-void gen6_rps_idle(struct drm_i915_private *i915);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
+void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
 
 void intel_rps_mark_interactive(struct drm_i915_private *i915,
 				bool interactive);
-- 
2.19.1



More information about the Intel-gfx-trybot mailing list