[Intel-gfx] [PATCH 20/70] drm/i915: Limit ring synchronisation (sw sempahores) RPS boosts

Chris Wilson chris at chris-wilson.co.uk
Tue Apr 7 08:20:44 PDT 2015


Ring switches can occur many times per frame, and are often out of
control, causing frequent RPS boosting for no practical benefit. Treat
the sw semaphore synchronisation as a separate client and only allow it
to boost once per busy/idle cycle.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c |  1 +
 drivers/gpu/drm/i915/i915_drv.h     | 34 ++++++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem.c     |  7 +++++--
 drivers/gpu/drm/i915/intel_pm.c     |  1 +
 4 files changed, 25 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5da74b46e202..c8fe548af41d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2296,6 +2296,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 			   list_empty(&file_priv->rps_boost) ? "" : ", active");
 		rcu_read_unlock();
 	}
+	seq_printf(m, "Semaphore boosts: %d\n", dev_priv->rps.semaphores.rps_boosts);
 	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d35778797ef0..057a1346e81f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -268,6 +268,22 @@ struct drm_i915_private;
 struct i915_mm_struct;
 struct i915_mmu_object;
 
+struct drm_i915_file_private {
+	struct drm_i915_private *dev_priv;
+	struct drm_file *file;
+
+	struct {
+		spinlock_t lock;
+		struct list_head request_list;
+	} mm;
+	struct idr context_idr;
+
+	struct list_head rps_boost;
+	struct intel_engine_cs *bsd_ring;
+
+	unsigned rps_boosts;
+};
+
 enum intel_dpll_id {
 	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
 	/* real shared dpll ids must be >= 0 */
@@ -1047,6 +1063,8 @@ struct intel_gen6_power_mgmt {
 	struct list_head clients;
 	unsigned boosts;
 
+	struct drm_i915_file_private semaphores;
+
 	/* manual wa residency calculations */
 	struct intel_rps_ei up_ei, down_ei;
 
@@ -2185,22 +2203,6 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
  * a later patch when the call to i915_seqno_passed() is obsoleted...
  */
 
-struct drm_i915_file_private {
-	struct drm_i915_private *dev_priv;
-	struct drm_file *file;
-
-	struct {
-		spinlock_t lock;
-		struct list_head request_list;
-	} mm;
-	struct idr context_idr;
-
-	struct list_head rps_boost;
-	struct intel_engine_cs *bsd_ring;
-
-	unsigned rps_boosts;
-};
-
 /*
  * A command that requires special handling by the command parser.
  */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a32a84598fac..3d31ff11fbef 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3045,9 +3045,12 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		return ret;
 
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
+		struct drm_i915_private *i915 = to_i915(obj->base.dev);
 		ret = __i915_wait_request(rq,
-					  atomic_read(&to_i915(obj->base.dev)->gpu_error.reset_counter),
-					  to_i915(obj->base.dev)->mm.interruptible, NULL, NULL);
+					  atomic_read(&i915->gpu_error.reset_counter),
+					  i915->mm.interruptible,
+					  NULL,
+					  &i915->rps.semaphores);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d3f4e9593db1..3e274cf3adaa 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6827,6 +6827,7 @@ void intel_pm_setup(struct drm_device *dev)
 	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
 			  intel_gen6_powersave_work);
 	INIT_LIST_HEAD(&dev_priv->rps.clients);
+	INIT_LIST_HEAD(&dev_priv->rps.semaphores.rps_boost);
 
 	dev_priv->pm.suspended = false;
 }
-- 
2.1.4



More information about the Intel-gfx mailing list