[Intel-gfx] [PATCH v2 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency()

Ville Syrjala ville.syrjala at linux.intel.com
Fri Oct 26 18:45:10 UTC 2018


From: Ville Syrjälä <ville.syrjala at linux.intel.com>

There's no point it having the skl+ and ilk+ codepaths for reading
the wm latency values in the same function. Split them apart.

v2: <usec> * 10

Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 195 +++++++++++++++++---------------
 1 file changed, 103 insertions(+), 92 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 74ac9c98e6d0..8820c59c56e4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,94 +2825,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
-				  uint16_t wm[8])
+static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
+				u16 wm[5])
 {
-	if (INTEL_GEN(dev_priv) >= 9) {
-		uint32_t val;
-		int ret, i;
-		int level, num_levels = dev_priv->wm.num_levels;
-
-		/* read the first set of memory latencies[0:3] */
-		val = 0; /* data0 to be programmed to 0 for first set */
-		mutex_lock(&dev_priv->pcu_lock);
-		ret = sandybridge_pcode_read(dev_priv,
-					     GEN9_PCODE_READ_MEM_LATENCY,
-					     &val);
-		mutex_unlock(&dev_priv->pcu_lock);
-
-		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-			return;
-		}
-
-		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-
-		/* read the second set of memory latencies[4:7] */
-		val = 1; /* data0 to be programmed to 1 for second set */
-		mutex_lock(&dev_priv->pcu_lock);
-		ret = sandybridge_pcode_read(dev_priv,
-					     GEN9_PCODE_READ_MEM_LATENCY,
-					     &val);
-		mutex_unlock(&dev_priv->pcu_lock);
-		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-			return;
-		}
-
-		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-
-		/*
-		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-		 * need to be disabled. We make sure to sanitize the values out
-		 * of the punit to satisfy this requirement.
-		 */
-		for (level = 1; level < num_levels; level++) {
-			if (wm[level] == 0) {
-				for (i = level + 1; i < num_levels; i++)
-					wm[i] = 0;
-				break;
-			}
-		}
-
-		/*
-		 * WaWmMemoryReadLatency:skl+,glk
-		 *
-		 * punit doesn't take into account the read latency so we need
-		 * to add 2us to the various latency levels we retrieve from the
-		 * punit when level 0 response data us 0us.
-		 */
-		if (wm[0] == 0) {
-			wm[0] += 2;
-			for (level = 1; level < num_levels; level++) {
-				if (wm[level] == 0)
-					break;
-				wm[level] += 2;
-			}
-		}
-
-		/*
-		 * WA Level-0 adjustment for 16GB DIMMs: SKL+
-		 * If we could not get dimm info enable this WA to prevent from
-		 * any underrun. If not able to get Dimm info assume 16GB dimm
-		 * to avoid any underrun.
-		 */
-		if (dev_priv->dram_info.is_16gb_dimm)
-			wm[0] += 1;
-
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
 		wm[0] = (sskpd >> 56) & 0xFF;
@@ -2929,7 +2845,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
-	} else if (INTEL_GEN(dev_priv) >= 5) {
+	} else if (IS_GEN5(dev_priv)) {
 		uint32_t mltr = I915_READ(MLTR_ILK);
 
 		/* ILK primary LP0 latency is 700 ns */
@@ -3007,8 +2923,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	else
 		dev_priv->wm.num_levels = 3;
 
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
-
+	ilk_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
@@ -3027,6 +2942,68 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 		snb_wm_latency_quirk(dev_priv);
 }
 
+static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
+				u16 wm[8])
+{
+	int level, num_levels = dev_priv->wm.num_levels;
+	int ret, i;
+	u32 val;
+
+	/* read the first set of memory latencies[0:3] */
+	val = 0; /* data0 to be programmed to 0 for first set */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_read(dev_priv,
+				     GEN9_PCODE_READ_MEM_LATENCY,
+				     &val);
+	mutex_unlock(&dev_priv->pcu_lock);
+
+	if (ret) {
+		DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+		return;
+	}
+
+	wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+
+	/* read the second set of memory latencies[4:7] */
+	val = 1; /* data0 to be programmed to 1 for second set */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_read(dev_priv,
+				     GEN9_PCODE_READ_MEM_LATENCY,
+				     &val);
+	mutex_unlock(&dev_priv->pcu_lock);
+	if (ret) {
+		DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+		return;
+	}
+
+	wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+
+	/*
+	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+	 * need to be disabled. We make sure to sanitize the values out
+	 * of the punit to satisfy this requirement.
+	 */
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] == 0) {
+			for (i = level + 1; i < num_levels; i++)
+				wm[i] = 0;
+			break;
+		}
+	}
+}
+
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[8])
 {
@@ -3037,14 +3014,48 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 		wm[level] *= 10;
 }
 
+static void skl_wm_latency_wa(struct drm_i915_private *dev_priv,
+			      u16 wm[8])
+{
+	/*
+	 * WaWmMemoryReadLatency:skl,glk
+	 *
+	 * punit doesn't take into account the read latency so we need
+	 * to add 2us to the various latency levels we retrieve from the
+	 * punit when level 0 response data us 0us.
+	 */
+	if (wm[0] == 0) {
+		int level, num_levels = dev_priv->wm.num_levels;
+
+		wm[0] += 2 * 10;
+
+		for (level = 1; level < num_levels; level++) {
+			if (wm[level] == 0)
+				break;
+
+			wm[level] += 2 * 10;
+		}
+	}
+
+	/*
+	 * WA Level-0 adjustment for 16GB DIMMs: SKL+
+	 * If we could not get dimm info enable this WA to prevent from
+	 * any underrun. If not able to get Dimm info assume 16GB dimm
+	 * to avoid any underrun.
+	 */
+	if (dev_priv->dram_info.is_16gb_dimm)
+		wm[0] += 1 * 10;
+}
+
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	dev_priv->wm.num_levels = 8;
 
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
-
+	skl_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
+	skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-- 
2.18.1



More information about the Intel-gfx mailing list