[Intel-gfx] [PATCH 09/12] drm/i915: Sanitize wm latency values for ilk+
Ville Syrjala
ville.syrjala at linux.intel.com
Wed Oct 10 13:04:51 UTC 2018
From: Ville Syrjälä <ville.syrjala at linux.intel.com>
For skl+ we disable all wm levels with a decreasing memory latency
value. Let's generalize the same code to work for all platoforms,
and let's use it for ilk-bdw as well since those platforms also
read the latency values from a scratch register.
Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
drivers/gpu/drm/i915/intel_pm.c | 58 ++++++++++++++++++++++++---------
1 file changed, 42 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8289c6378db3..62334e413220 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,6 +2825,37 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
PIPE_WM_LINETIME_TIME(linetime);
}
+static void intel_sanitize_wm_latency(struct drm_i915_private *dev_priv,
+ u16 *wm)
+{
+ int level, num_levels = dev_priv->wm.num_levels;
+
+ /*
+ * If we don't have WM0 latency, assume
+ * 5 usec and disable all WM1+ levels.
+ * 5 usec seems like a safe(ish) fallback value.
+ */
+ if (WARN(wm[0] == 0, "WM0 memory latency value is zero")) {
+ wm[0] = 50;
+
+ for (level = 1; level < num_levels; level++)
+ wm[level] = 0;
+ return;
+ }
+
+ /* Make sure the latencies are non-decreasing */
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] < wm[level - 1]) {
+ WARN(wm[level] != 0,
+ "Decreasing WM memory latency value(s)");
+
+ for (; level < num_levels; level++)
+ wm[level] = 0;
+ break;
+ }
+ }
+}
+
static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[5])
{
@@ -2888,8 +2919,11 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
/* WM1+ latencies must be multiples of .5 usec */
min = roundup(min, 5);
- for (level = 1; level < num_levels; level++)
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
+ break;
wm[level] = max(wm[level], min);
+ }
return true;
}
@@ -2935,6 +2969,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.spr_latency);
ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+ intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+ intel_sanitize_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+ intel_sanitize_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+
intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -2946,8 +2984,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
{
- int level, num_levels = dev_priv->wm.num_levels;
- int ret, i;
+ int ret;
u32 val;
/* read the first set of memory latencies[0:3] */
@@ -2990,19 +3027,6 @@ static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
GEN9_MEM_LATENCY_LEVEL_MASK;
wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
-
- /*
- * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
- * need to be disabled. We make sure to sanitize the values out
- * of the punit to satisfy this requirement.
- */
- for (level = 1; level < num_levels; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i < num_levels; i++)
- wm[i] = 0;
- break;
- }
- }
}
static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
@@ -3058,6 +3082,8 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
+ intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+
memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
sizeof(dev_priv->wm.pri_latency));
memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
--
2.18.1
More information about the Intel-gfx
mailing list