[Intel-gfx] [PATCH v2 09/12] drm/i915: Sanitize wm latency values for ilk+

Ville Syrjala ville.syrjala at linux.intel.com
Fri Oct 26 19:11:23 UTC 2018


From: Ville Syrjälä <ville.syrjala at linux.intel.com>

For skl+ we disable all wm levels with a decreasing memory latency
value. Let's generalize the same code to work for all platoforms,
and let's use it for ilk-bdw as well since those platforms also
read the latency values from a scratch register.

v2: n*10 usec, rebase

Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 57 ++++++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8820c59c56e4..c72d3fd71b6f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,6 +2825,41 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
+static void intel_sanitize_wm_latency(struct drm_i915_private *dev_priv,
+				      u16 *wm)
+{
+	int level, num_levels = dev_priv->wm.num_levels;
+
+	/*
+	 * If we don't have WM0 latency, assume
+	 * 5 usec and disable all WM1+ levels.
+	 * 5 usec seems like a safe(ish) fallback value.
+	 */
+	if (WARN(wm[0] == 0, "WM0 memory latency value is zero")) {
+		intel_print_wm_latency(dev_priv, "Bad", wm);
+
+		wm[0] = 5 * 10;
+
+		for (level = 1; level < num_levels; level++)
+			wm[level] = 0;
+		return;
+	}
+
+	/* Make sure the latencies are non-decreasing */
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] >= wm[level - 1])
+			continue;
+
+		if (WARN(wm[level] != 0,
+			 "Decreasing WM memory latency value(s)"))
+			intel_print_wm_latency(dev_priv, "Bad", wm);
+
+		for (; level < num_levels; level++)
+			wm[level] = 0;
+		break;
+	}
+}
+
 static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
 				u16 wm[5])
 {
@@ -2934,6 +2969,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.spr_latency);
 	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
 
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+
 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -2945,8 +2984,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
 				u16 wm[8])
 {
-	int level, num_levels = dev_priv->wm.num_levels;
-	int ret, i;
+	int ret;
 	u32 val;
 
 	/* read the first set of memory latencies[0:3] */
@@ -2989,19 +3027,6 @@ static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
 		GEN9_MEM_LATENCY_LEVEL_MASK;
 	wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
 		GEN9_MEM_LATENCY_LEVEL_MASK;
-
-	/*
-	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-	 * need to be disabled. We make sure to sanitize the values out
-	 * of the punit to satisfy this requirement.
-	 */
-	for (level = 1; level < num_levels; level++) {
-		if (wm[level] == 0) {
-			for (i = level + 1; i < num_levels; i++)
-				wm[i] = 0;
-			break;
-		}
-	}
 }
 
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
@@ -3056,6 +3081,8 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 
 	skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
 
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-- 
2.18.1



More information about the Intel-gfx mailing list