[CI 18/23] drm/i915: Track which pipe is in CxSR mode

Harish Chegondi harish.chegondi at intel.com
Thu Apr 18 21:50:08 UTC 2019


From: Ville Syrjälä <ville.syrjala at linux.intel.com>

Before enabling a second pipe we have to turn off CxSR and wait for
the first pipe to exit CxSR mode. Otherwise the first pipe can still
be using the CxSR FIFO configuration when the second pipe comes up,
and that can lead to underruns.

The simplest way to do this is to just track the "sr_pipe" and
add do the vblank wait on it when SR is getting disabled by
the watermark programming done for the second pipe.

v2: .disable_cxsr() hook

Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h      |   5 +-
 drivers/gpu/drm/i915/intel_display.c |  12 +-
 drivers/gpu/drm/i915/intel_pm.c      | 300 +++++++++++++++++++--------
 3 files changed, 223 insertions(+), 94 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6013a16a7cb4..c91faf914014 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -296,6 +296,7 @@ struct drm_i915_display_funcs {
 	void (*optimize_watermarks)(struct intel_atomic_state *state,
 				    struct intel_crtc_state *cstate);
 	int (*compute_global_watermarks)(struct intel_atomic_state *state);
+	void (*disable_cxsr)(struct intel_crtc *crtc);
 	void (*update_wm)(struct intel_crtc *crtc);
 	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
 	/* Returns the active state of the crtc, and if the crtc is active,
@@ -1091,6 +1092,7 @@ struct vlv_wm_values {
 	struct g4x_pipe_wm normal[3]; /* for each pipe */
 	struct g4x_sr_wm sr;
 	struct vlv_wm_ddl_values ddl[3]; /* for each pipe */
+	enum pipe sr_pipe;
 	u8 level;
 };
 
@@ -1098,6 +1100,7 @@ struct g4x_wm_values {
 	struct g4x_pipe_wm normal[2]; /* for each pipe */
 	struct g4x_sr_wm sr;
 	struct g4x_sr_wm hpll;
+	enum pipe sr_pipe;
 	bool fbc_en;
 };
 
@@ -3413,8 +3416,6 @@ extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
 extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
 				       bool interactive);
-extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
-				  bool enable);
 void intel_dsc_enable(struct intel_encoder *encoder,
 		      const struct intel_crtc_state *crtc_state);
 void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bd40a4a6739..8980beb7ada1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5425,9 +5425,8 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH(dev_priv) &&
-	    intel_set_memory_cxsr(dev_priv, false))
-		intel_wait_for_vblank(dev_priv, pipe);
+	if (dev_priv->display.disable_cxsr)
+		dev_priv->display.disable_cxsr(intel_crtc);
 }
 
 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
@@ -5592,9 +5591,10 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
-	    pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
-		intel_wait_for_vblank(dev_priv, crtc->pipe);
+	if (pipe_config->disable_cxsr &&
+	    dev_priv->display.disable_cxsr &&
+	    old_crtc_state->base.active)
+		dev_priv->display.disable_cxsr(crtc);
 
 	/*
 	 * IVB workaround: must disable low power watermarks for at least
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9716cbf89a27..1ead91ad7ace 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -354,16 +354,49 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
 #define FW_WM(value, plane) \
 	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
 
-static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+/**
+ * i9xx_set_memory_cxsr - Configure CxSR state
+ * @dev_priv: i915 device
+ * @enable: Allow vs. disallow CxSR
+ *
+ * Allow or disallow the system to enter a special CxSR
+ * (C-state self refresh) state. What typically happens in CxSR mode
+ * is that several display FIFOs may get combined into a single larger
+ * FIFO for a particular plane (so called max FIFO mode) to allow the
+ * system to defer memory fetches longer, and the memory will enter
+ * self refresh.
+ *
+ * Note that enabling CxSR does not guarantee that the system enter
+ * this special mode, nor does it guarantee that the system stays
+ * in that mode once entered. So this just allows/disallows the system
+ * to autonomously utilize the CxSR mode. Other factors such as core
+ * C-states will affect when/if the system actually enters/exits the
+ * CxSR mode.
+ *
+ * Note that on VLV/CHV this actually only controls the max FIFO mode,
+ * and the system is free to enter/exit memory self refresh at any time
+ * even when the use of CxSR has been disallowed.
+ *
+ * While the system is actually in the CxSR/max FIFO mode, some plane
+ * control registers will not get latched on vblank. Thus in order to
+ * guarantee the system will respond to changes in the plane registers
+ * we must always disallow CxSR prior to making changes to those registers.
+ * Unfortunately the system will re-evaluate the CxSR conditions at
+ * frame start which happens after vblank start (which is when the plane
+ * registers would get latched), so we can't proceed with the plane update
+ * during the same frame where we disallowed CxSR.
+ *
+ * Certain platforms also have a deeper HPLL SR mode. Fortunately the
+ * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
+ * the hardware w.r.t. HPLL SR when writing to plane registers.
+ * Disallowing just CxSR is sufficient.
+ */
+static bool i9xx_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 {
 	bool was_enabled;
 	u32 val;
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-		POSTING_READ(FW_BLC_SELF_VLV);
-	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+	if (IS_I965GM(dev_priv)) {
 		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
 		POSTING_READ(FW_BLC_SELF);
@@ -397,6 +430,9 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
 		return false;
 	}
 
+	if (enable == was_enabled)
+		return was_enabled;
+
 	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
 
 	DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
@@ -406,58 +442,6 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
 	return was_enabled;
 }
 
-/**
- * intel_set_memory_cxsr - Configure CxSR state
- * @dev_priv: i915 device
- * @enable: Allow vs. disallow CxSR
- *
- * Allow or disallow the system to enter a special CxSR
- * (C-state self refresh) state. What typically happens in CxSR mode
- * is that several display FIFOs may get combined into a single larger
- * FIFO for a particular plane (so called max FIFO mode) to allow the
- * system to defer memory fetches longer, and the memory will enter
- * self refresh.
- *
- * Note that enabling CxSR does not guarantee that the system enter
- * this special mode, nor does it guarantee that the system stays
- * in that mode once entered. So this just allows/disallows the system
- * to autonomously utilize the CxSR mode. Other factors such as core
- * C-states will affect when/if the system actually enters/exits the
- * CxSR mode.
- *
- * Note that on VLV/CHV this actually only controls the max FIFO mode,
- * and the system is free to enter/exit memory self refresh at any time
- * even when the use of CxSR has been disallowed.
- *
- * While the system is actually in the CxSR/max FIFO mode, some plane
- * control registers will not get latched on vblank. Thus in order to
- * guarantee the system will respond to changes in the plane registers
- * we must always disallow CxSR prior to making changes to those registers.
- * Unfortunately the system will re-evaluate the CxSR conditions at
- * frame start which happens after vblank start (which is when the plane
- * registers would get latched), so we can't proceed with the plane update
- * during the same frame where we disallowed CxSR.
- *
- * Certain platforms also have a deeper HPLL SR mode. Fortunately the
- * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
- * the hardware w.r.t. HPLL SR when writing to plane registers.
- * Disallowing just CxSR is sufficient.
- */
-bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
-{
-	bool ret;
-
-	mutex_lock(&dev_priv->wm.wm_mutex);
-	ret = _intel_set_memory_cxsr(dev_priv, enable);
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->wm.vlv.sr.enable = enable;
-	else if (IS_G4X(dev_priv))
-		dev_priv->wm.g4x.sr.enable = enable;
-	mutex_unlock(&dev_priv->wm.wm_mutex);
-
-	return ret;
-}
-
 /*
  * Latency for FIFO fetches is dependent on several factors:
  *   - memory configuration (speed, channels)
@@ -859,7 +843,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
 					 dev_priv->mem_freq);
 	if (!latency) {
 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-		intel_set_memory_cxsr(dev_priv, false);
+		i9xx_set_memory_cxsr(dev_priv, false);
 		return;
 	}
 
@@ -910,12 +894,21 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
 		I915_WRITE(DSPFW3, reg);
 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
-		intel_set_memory_cxsr(dev_priv, true);
+		i9xx_set_memory_cxsr(dev_priv, true);
 	} else {
-		intel_set_memory_cxsr(dev_priv, false);
+		i9xx_set_memory_cxsr(dev_priv, false);
 	}
 }
 
+static void i9xx_disable_cxsr(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe sr_pipe = crtc->pipe;
+
+	if (i9xx_set_memory_cxsr(dev_priv, false))
+		intel_wait_for_vblank(dev_priv, sr_pipe);
+}
+
 /*
  * Documentation says:
  * "If the line size is small, the TLB fetches can get in the way of the
@@ -1492,11 +1485,58 @@ static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
 	return 0;
 }
 
+static void g4x_set_memory_cxsr(struct drm_i915_private *dev_priv,
+				bool enable)
+{
+	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+
+	if (enable == wm->sr.enable)
+		return;
+
+	I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+	POSTING_READ(FW_BLC_SELF);
+
+	trace_intel_memory_cxsr(dev_priv, wm->sr.enable, enable);
+
+	DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
+		      enableddisabled(enable),
+		      enableddisabled(wm->sr.enable));
+
+	wm->sr.enable = enable;
+}
+
+static void _g4x_disable_cxsr(struct drm_i915_private *dev_priv)
+{
+	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+	bool was_enabled = wm->sr.enable;
+
+	g4x_set_memory_cxsr(dev_priv, false);
+
+	if (was_enabled) {
+		WARN_ON(wm->sr_pipe == INVALID_PIPE);
+		intel_wait_for_vblank(dev_priv, wm->sr_pipe);
+	}
+}
+
+static void g4x_disable_cxsr(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct g4x_wm_state *active = &crtc->wm.active.g4x;
+
+	mutex_lock(&dev_priv->wm.wm_mutex);
+
+	active->sr.enable = false;
+
+	_g4x_disable_cxsr(dev_priv);
+
+	mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
 			 struct g4x_wm_values *wm)
 {
 	struct intel_crtc *crtc;
-	int num_active_crtcs = 0;
+	unsigned int active_pipes = 0;
 
 	wm->sr.enable = true;
 	wm->hpll.enable = true;
@@ -1515,13 +1555,16 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
 		if (!active->fbc_en)
 			wm->fbc_en = false;
 
-		num_active_crtcs++;
+		active_pipes |= BIT(crtc->pipe);
 	}
 
-	if (num_active_crtcs != 1) {
+	if (!is_power_of_2(active_pipes)) {
 		wm->sr.enable = false;
 		wm->hpll.enable = false;
 		wm->fbc_en = false;
+		wm->sr_pipe = INVALID_PIPE;
+	} else {
+		wm->sr_pipe = ilog2(active_pipes);
 	}
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -1529,9 +1572,9 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
 		enum pipe pipe = crtc->pipe;
 
 		wm->normal[pipe] = active->normal;
-		if (crtc->active && wm->sr.enable)
+		if (pipe == wm->sr_pipe && wm->sr.enable)
 			wm->sr = active->sr;
-		if (crtc->active && wm->hpll.enable)
+		if (pipe == wm->sr_pipe && wm->hpll.enable)
 			wm->hpll = active->hpll;
 	}
 }
@@ -1546,13 +1589,17 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
 	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
 		return;
 
-	if (is_disabling(old_wm->sr.enable, new_wm.sr.enable, true))
-		_intel_set_memory_cxsr(dev_priv, false);
+	if (is_disabling(old_wm->sr.enable, new_wm.sr.enable, true)) {
+		g4x_set_memory_cxsr(dev_priv, false);
+		WARN_ON(old_wm->sr_pipe == INVALID_PIPE);
+	}
 
 	g4x_write_wm_values(dev_priv, &new_wm);
 
-	if (is_enabling(old_wm->sr.enable, new_wm.sr.enable, true))
-		_intel_set_memory_cxsr(dev_priv, true);
+	if (is_enabling(old_wm->sr.enable, new_wm.sr.enable, true)) {
+		g4x_set_memory_cxsr(dev_priv, true);
+		WARN_ON(new_wm.sr_pipe == INVALID_PIPE);
+	}
 
 	*old_wm = new_wm;
 }
@@ -2120,11 +2167,60 @@ static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
 	return 0;
 }
 
+static void vlv_set_memory_cxsr(struct drm_i915_private *dev_priv,
+				bool enable)
+{
+	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+
+	if (enable == wm->sr.enable)
+		return;
+
+	I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+	POSTING_READ(FW_BLC_SELF_VLV);
+
+	trace_intel_memory_cxsr(dev_priv, wm->sr.enable, enable);
+
+	DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
+		      enableddisabled(enable),
+		      enableddisabled(wm->sr.enable));
+
+	wm->sr.enable = enable;
+}
+
+static void _vlv_disable_cxsr(struct drm_i915_private *dev_priv)
+{
+	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	bool was_enabled = wm->sr.enable;
+
+	vlv_set_memory_cxsr(dev_priv, false);
+
+	if (was_enabled) {
+		WARN_ON(wm->sr_pipe == INVALID_PIPE);
+		intel_wait_for_vblank(dev_priv, wm->sr_pipe);
+	}
+}
+
+static void vlv_disable_cxsr(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct vlv_wm_state *active = &crtc->wm.active.vlv;
+	int level, num_levels = intel_wm_num_levels(dev_priv);
+
+	mutex_lock(&dev_priv->wm.wm_mutex);
+
+	for (level = 0; level < num_levels; level++)
+		active->sr[level].enable = false;
+
+	_vlv_disable_cxsr(dev_priv);
+
+	mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
 			 struct vlv_wm_values *wm)
 {
 	struct intel_crtc *crtc;
-	int num_active_crtcs = 0;
+	unsigned int active_pipes = 0;
 
 	wm->level = dev_priv->wm.max_level;
 	wm->sr.enable = true;
@@ -2138,22 +2234,26 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
 		if (!active->sr[VLV_WM_LEVEL_PM2].enable)
 			wm->sr.enable = false;
 
-		num_active_crtcs++;
+		active_pipes |= BIT(crtc->pipe);
+
 		wm->level = min_t(int, wm->level, active->num_levels - 1);
 	}
 
-	if (num_active_crtcs != 1)
+	if (!is_power_of_2(active_pipes)) {
 		wm->sr.enable = false;
-
-	if (num_active_crtcs > 1)
-		wm->level = VLV_WM_LEVEL_PM2;
+		wm->sr_pipe = INVALID_PIPE;
+		if (active_pipes)
+			wm->level = VLV_WM_LEVEL_PM2;
+	} else {
+		wm->sr_pipe = ilog2(active_pipes);
+	}
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		const struct vlv_wm_state *active = &crtc->wm.active.vlv;
 		enum pipe pipe = crtc->pipe;
 
 		wm->normal[pipe] = active->normal[wm->level];
-		if (crtc->active && wm->sr.enable)
+		if (pipe == wm->sr_pipe && wm->sr.enable)
 			wm->sr = active->sr[wm->level];
 
 		wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
@@ -2179,13 +2279,17 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
 	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
 		chv_set_memory_pm5(dev_priv, false);
 
-	if (is_disabling(old_wm->sr.enable, new_wm.sr.enable, true))
-		_intel_set_memory_cxsr(dev_priv, false);
+	if (is_disabling(old_wm->sr.enable, new_wm.sr.enable, true)) {
+		vlv_set_memory_cxsr(dev_priv, false);
+		WARN_ON(old_wm->sr_pipe == INVALID_PIPE);
+	}
 
 	vlv_write_wm_values(dev_priv, &new_wm);
 
-	if (is_enabling(old_wm->sr.enable, new_wm.sr.enable, true))
-		_intel_set_memory_cxsr(dev_priv, true);
+	if (is_enabling(old_wm->sr.enable, new_wm.sr.enable, true)) {
+		vlv_set_memory_cxsr(dev_priv, true);
+		WARN_ON(new_wm.sr_pipe == INVALID_PIPE);
+	}
 
 	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
 		chv_set_memory_pm5(dev_priv, true);
@@ -2274,7 +2378,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 	} else {
 		cxsr_enabled = false;
 		/* Turn off self refresh if both pipes are enabled */
-		intel_set_memory_cxsr(dev_priv, false);
+		i9xx_set_memory_cxsr(dev_priv, false);
 	}
 
 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2291,7 +2395,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
 	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
 
 	if (cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+		i9xx_set_memory_cxsr(dev_priv, true);
 }
 
 #undef FW_WM
@@ -2386,7 +2490,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 	cwm = 2;
 
 	/* Play safe and disable self-refresh before adjusting watermarks. */
-	intel_set_memory_cxsr(dev_priv, false);
+	i9xx_set_memory_cxsr(dev_priv, false);
 
 	/* Calc sr entries for one plane configs */
 	if (HAS_FW_BLC(dev_priv) && enabled) {
@@ -2436,7 +2540,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
 	I915_WRITE(FW_BLC2, fwater_hi);
 
 	if (enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+		i9xx_set_memory_cxsr(dev_priv, true);
 }
 
 static void i845_update_wm(struct intel_crtc *unused_crtc)
@@ -6026,6 +6130,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
 void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
 	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+	unsigned int active_pipes = 0;
 	struct intel_crtc *crtc;
 
 	g4x_read_wm_values(dev_priv, wm);
@@ -6039,6 +6144,9 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
 		enum plane_id plane_id;
 		int level, max_level;
 
+		if (crtc_state->base.active)
+			active_pipes |= BIT(pipe);
+
 		active->sr.enable = wm->sr.enable;
 		active->hpll.enable = wm->hpll.enable;
 		active->fbc_en = wm->fbc_en;
@@ -6102,6 +6210,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
 			      wm->normal[pipe].plane[PLANE_SPRITE0]);
 	}
 
+	if (wm->sr.enable) {
+		WARN_ON(!is_power_of_2(active_pipes));
+		wm->sr_pipe = ilog2(active_pipes);
+	}
+
 	DRM_DEBUG_KMS("Initial SR watermarks: enable=%s, plane=%d, cursor=%d fbc=%s/%d\n",
 		      yesno(wm->sr.enable), wm->sr.plane,
 		      wm->sr.cursor, yesno(wm->fbc_en), wm->sr.fbc);
@@ -6152,6 +6265,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
 	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	unsigned int active_pipes = 0;
 	struct intel_crtc *crtc;
 	u32 val;
 
@@ -6203,6 +6317,9 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 		enum plane_id plane_id;
 		int level;
 
+		if (crtc_state->base.active)
+			active_pipes |= BIT(pipe);
+
 		vlv_get_fifo_size(crtc_state);
 
 		active->num_levels = wm->level + 1;
@@ -6241,6 +6358,11 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
 			      wm->normal[pipe].plane[PLANE_SPRITE1]);
 	}
 
+	if (wm->sr.enable) {
+		WARN_ON(!is_power_of_2(active_pipes));
+		wm->sr_pipe = ilog2(active_pipes);
+	}
+
 	DRM_DEBUG_KMS("Initial SR watermarks: enable=%s, plane=%d, SR cursor=%d, level=%d\n",
 		      yesno(wm->sr.enable), wm->sr.plane, wm->sr.cursor, wm->level);
 }
@@ -9668,12 +9790,14 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 		dev_priv->display.initial_watermarks = vlv_initial_watermarks;
 		dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
 		dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
+		dev_priv->display.disable_cxsr = vlv_disable_cxsr;
 	} else if (IS_G4X(dev_priv)) {
 		g4x_setup_wm_latency(dev_priv);
 		dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
 		dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
 		dev_priv->display.initial_watermarks = g4x_initial_watermarks;
 		dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
+		dev_priv->display.disable_cxsr = g4x_disable_cxsr;
 	} else if (IS_PINEVIEW(dev_priv)) {
 		if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
 					    dev_priv->is_ddr3,
@@ -9685,15 +9809,18 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
 				 dev_priv->fsb_freq, dev_priv->mem_freq);
 			/* Disable CxSR and never update its watermark again */
-			intel_set_memory_cxsr(dev_priv, false);
+			i9xx_set_memory_cxsr(dev_priv, false);
 			dev_priv->display.update_wm = NULL;
 		} else
 			dev_priv->display.update_wm = pineview_update_wm;
+		dev_priv->display.disable_cxsr = i9xx_disable_cxsr;
 	} else if (IS_GEN(dev_priv, 4)) {
 		dev_priv->display.update_wm = i965_update_wm;
+		dev_priv->display.disable_cxsr = i9xx_disable_cxsr;
 	} else if (IS_GEN(dev_priv, 3)) {
 		dev_priv->display.update_wm = i9xx_update_wm;
 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+		dev_priv->display.disable_cxsr = i9xx_disable_cxsr;
 	} else if (IS_GEN(dev_priv, 2)) {
 		if (INTEL_INFO(dev_priv)->num_pipes == 1) {
 			dev_priv->display.update_wm = i845_update_wm;
@@ -9702,6 +9829,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 			dev_priv->display.update_wm = i9xx_update_wm;
 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
 		}
+		dev_priv->display.disable_cxsr = i9xx_disable_cxsr;
 	} else {
 		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
 	}
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list