[PATCH 11/14] drm/i915/gt: Avoid touching rps directly from sysfs entry points

Chris Wilson chris at chris-wilson.co.uk
Tue Dec 31 17:32:04 UTC 2019


Defer the adjustment of rps to the worker, so that we can trivially
coordinate all updates with minimal locking.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti at intel.com>
---
 drivers/gpu/drm/i915/gt/intel_rps.c | 41 ++++++++++--------------
 drivers/gpu/drm/i915/i915_sysfs.c   | 48 +++++++++++------------------
 2 files changed, 34 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index f232036c3c7a..58556672203a 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -600,6 +600,7 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
 
 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
 {
+	struct intel_uncore *uncore = rps_to_uncore(rps);
 	int new_power;
 
 	new_power = rps->power.mode;
@@ -636,6 +637,14 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
 		new_power = HIGH_POWER;
 	rps_set_power(rps, new_power);
 	mutex_unlock(&rps->power.mutex);
+
+	/*
+	 * Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+
+	set(uncore, GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
+	set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
 }
 
 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
@@ -718,32 +727,32 @@ void intel_rps_unpark(struct intel_rps *rps)
 	 * Use the user's desired frequency as a guide, but for better
 	 * performance, jump directly to RPe as our starting frequency.
 	 */
-	mutex_lock(&rps->lock);
-	rps->active = true;
 	freq = max(rps->cur_freq, rps->efficient_freq),
 	freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
-	intel_rps_set(rps, freq);
+	rps_set(rps, freq, true);
 	rps->last_adj = 0;
-	mutex_unlock(&rps->lock);
 
 	if (INTEL_GEN(rps_to_i915(rps)) >= 6)
 		rps_enable_interrupts(rps);
 
 	if (IS_GEN(rps_to_i915(rps), 5))
 		gen5_rps_update(rps);
+
+	WRITE_ONCE(rps->active, true);
 }
 
 void intel_rps_park(struct intel_rps *rps)
 {
 	struct drm_i915_private *i915 = rps_to_i915(rps);
 
-	if (!rps->enabled)
+	if (!rps->active)
 		return;
 
+	WRITE_ONCE(rps->active, false);
+
 	if (INTEL_GEN(i915) >= 6)
 		rps_disable_interrupts(rps);
 
-	rps->active = false;
 	if (rps->last_freq <= rps->idle_freq)
 		return;
 
@@ -800,19 +809,6 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
 		err = rps_set(rps, val, true);
 		if (err)
 			return err;
-
-		/*
-		 * Make sure we continue to get interrupts
-		 * until we hit the minimum or maximum frequencies.
-		 */
-		if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
-			struct intel_uncore *uncore = rps_to_uncore(rps);
-
-			set(uncore,
-			    GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
-
-			set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
-		}
 	}
 
 	rps->cur_freq = val;
@@ -1455,14 +1451,10 @@ static void rps_work(struct work_struct *work)
 	client_boost = atomic_read(&rps->num_waiters);
 	spin_unlock_irq(&gt->irq_lock);
 
-	/* Make sure we didn't queue anything we're not going to process. */
-	if ((pm_iir & rps->pm_events) == 0 && !client_boost)
-		goto out;
+	pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
 
 	mutex_lock(&rps->lock);
 
-	pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
-
 	adj = rps->last_adj;
 	new_freq = rps->cur_freq;
 	min = rps->min_freq_softlimit;
@@ -1527,7 +1519,6 @@ static void rps_work(struct work_struct *work)
 
 	mutex_unlock(&rps->lock);
 
-out:
 	spin_lock_irq(&gt->irq_lock);
 	gen6_gt_pm_unmask_irq(gt, rps->pm_events);
 	spin_unlock_irq(&gt->irq_lock);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ad2b1b833d7b..9d875fc24b00 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -256,6 +256,14 @@ static const struct bin_attribute dpf_attrs_1 = {
 	.private = (void *)1
 };
 
+static void rps_update(struct intel_rps *rps)
+{
+	rps->last_adj = 0;
+
+	if (rps->active)
+		schedule_work(&rps->work);
+}
+
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
@@ -291,7 +299,6 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 {
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 	struct intel_rps *rps = &dev_priv->gt.rps;
-	bool boost = false;
 	ssize_t ret;
 	u32 val;
 
@@ -307,11 +314,10 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 	mutex_lock(&rps->lock);
 	if (val != rps->boost_freq) {
 		rps->boost_freq = val;
-		boost = atomic_read(&rps->num_waiters);
+		if (atomic_read(&rps->num_waiters))
+			rps_update(rps);
 	}
 	mutex_unlock(&rps->lock);
-	if (boost)
-		schedule_work(&rps->work);
 
 	return count;
 }
@@ -362,22 +368,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 		DRM_DEBUG("User requested overclocking to %d\n",
 			  intel_gpu_freq(rps, val));
 
-	rps->max_freq_softlimit = val;
-
-	val = clamp_t(int, rps->cur_freq,
-		      rps->min_freq_softlimit,
-		      rps->max_freq_softlimit);
-
-	/*
-	 * We still need *_set_rps to process the new max_delay and
-	 * update the interrupt limits and PMINTRMSK even though
-	 * frequency request may be unchanged.
-	 */
-	intel_rps_set(rps, val);
+	if (val != rps->max_freq_softlimit) {
+		rps->max_freq_softlimit = val;
+		rps_update(rps);
+	}
 
 unlock:
 	mutex_unlock(&rps->lock);
-
 	return ret ?: count;
 }
 
@@ -413,22 +410,13 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 		goto unlock;
 	}
 
-	rps->min_freq_softlimit = val;
-
-	val = clamp_t(int, rps->cur_freq,
-		      rps->min_freq_softlimit,
-		      rps->max_freq_softlimit);
-
-	/*
-	 * We still need *_set_rps to process the new min_delay and
-	 * update the interrupt limits and PMINTRMSK even though
-	 * frequency request may be unchanged.
-	 */
-	intel_rps_set(rps, val);
+	if (val != rps->min_freq_softlimit) {
+		rps->min_freq_softlimit = val;
+		rps_update(rps);
+	}
 
 unlock:
 	mutex_unlock(&rps->lock);
-
 	return ret ?: count;
 }
 
-- 
2.25.0.rc0



More information about the Intel-gfx-trybot mailing list