[Intel-gfx] [PATCH 2/2] drm/i915: Add Baytrail PSR Support.
Rodrigo Vivi
rodrigo.vivi at gmail.com
Wed Feb 5 20:04:31 CET 2014
This patch adds PSR Support to Baytrail.
Baytrail cannot easily detect screen updates and force PSR exit.
So we inactivate it on {busy_ioctl, set_domain, sw_finish and mark_busy}
and update to enable it back on next display mark_idle.
v2: Also inactivate PSR on cursor update.
v3: Inactivate PSR on mark_busy, dset_domain and sw_finish_ioctl, and
early on page flip besides avoid initializing inactive/active flag
more than once.
v4: Fix identation issues.
v5: Rebase and add Baytrail per pipe support although leaving PIPE_B
support disabled by for now since it isn't working properly yet.
v6: Removing forgotten comment and useless clkgating definition.
v7: Remove inactivate from set_domain. Chris warned this was semanticaly
wrong.
v8: Accept Ville's suggestions: Use register's names matching spec and
warn if transition took longer than it should.
v9: New version with delayed work to get PSR back. Disabling it on
set_domain but not rescheduing it back until next finish_page_flip.
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala at linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at gmail.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 36 ++++-
drivers/gpu/drm/i915/i915_drv.h | 5 +-
drivers/gpu/drm/i915/i915_gem.c | 12 ++
drivers/gpu/drm/i915/i915_reg.h | 37 +++++
drivers/gpu/drm/i915/i915_suspend.c | 2 +-
drivers/gpu/drm/i915/intel_display.c | 18 ++-
drivers/gpu/drm/i915/intel_dp.c | 256 ++++++++++++++++++++++++++++++-----
drivers/gpu/drm/i915/intel_drv.h | 1 +
8 files changed, 323 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bc8707f..2949c48 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1900,6 +1900,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 psrperf = 0;
+ u32 statA = 0;
+ u32 statB = 0;
bool enabled = false;
intel_runtime_pm_get(dev_priv);
@@ -1907,14 +1909,38 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
- enabled = HAS_PSR(dev) &&
- I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "Enabled: %s\n", yesno(enabled));
+ if (HAS_PSR(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ statA = I915_READ(VLV_PSRSTAT(PIPE_A)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ statB = I915_READ(VLV_PSRSTAT(PIPE_B)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ enabled = ((statA == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (statA == VLV_EDP_PSR_ACTIVE_SF_UPDATE) ||
+ (statB == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (statB == VLV_EDP_PSR_ACTIVE_SF_UPDATE));
+ } else
+ enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Enabled: %s", yesno(enabled));
- if (HAS_PSR(dev))
+ if (IS_VALLEYVIEW(dev)) {
+ if ((statA == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (statA == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ seq_puts(m, " pipe A");
+ if ((statB == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (statB == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
+ seq_puts(m, " pipe B");
+ }
+
+ seq_puts(m, "\n");
+
+ /* VLV PSR has no kind of performance counter */
+ if (HAS_PSR(dev) && !IS_VALLEYVIEW(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ }
intel_runtime_pm_put(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 21470be..87c346a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -747,7 +747,9 @@ struct i915_psr {
bool sink_support;
bool source_ok;
bool setup_done;
+ bool active;
struct mutex lock;
+ struct delayed_work work;
};
enum intel_pch {
@@ -1867,7 +1869,8 @@ struct drm_i915_file_private {
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
+#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+ IS_VALLEYVIEW(dev))
#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 39770f7..5ef1380 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1256,6 +1256,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
+ /* Here we don't reschedule work to get PSR back because userspace
+ * can set domain and take as much as time it wants to write it.
+ * We only reschedule it back on next finish_page_flip. There is the
+ * risk of PSR be inactive forever depending on how compositor works,
+ * but this is the safest way to avoid loosing screen updates.
+ */
+ intel_edp_psr_inactivate(dev, false);
+
/* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
@@ -1299,6 +1307,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
+ intel_edp_psr_inactivate(dev, true);
+
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
@@ -4059,6 +4069,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
+ intel_edp_psr_inactivate(dev, true);
+
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbbaf26..a5815d0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1968,6 +1968,43 @@
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+/* VLV eDP PSR registers */
+#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
+#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
+#define VLV_EDP_PSR_ENABLE (1<<0)
+#define VLV_EDP_PSR_RESET (1<<1)
+#define VLV_EDP_PSR_MODE_MASK (7<<2)
+#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3)
+#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2)
+#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7)
+#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8)
+#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9)
+#define VLV_EDP_PSR_DBL_FRAME (1<<10)
+#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16)
+#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
+#define VLV_EDP_PSR_INT_TRANSITION (1<<24)
+#define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB)
+
+#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
+#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
+#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30)
+#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31)
+#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30)
+#define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB)
+
+#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
+#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
+#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3)
+#define VLV_EDP_PSR_CURR_STATE_MASK 7
+#define VLV_EDP_PSR_DISABLED (0<<0)
+#define VLV_EDP_PSR_INACTIVE (1<<0)
+#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0)
+#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0)
+#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0)
+#define VLV_EDP_PSR_EXIT (5<<0)
+#define VLV_EDP_PSR_IN_TRANS (1<<7)
+#define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB)
+
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ffcba21..6b31020 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -289,8 +289,8 @@ static void i915_restore_display(struct drm_device *dev)
}
/* Force a full PSR setup on resume */
+ intel_edp_psr_inactivate(dev, false);
dev_priv->psr.setup_done = false;
- intel_edp_psr_update(dev);
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1a9aa19..92d6df4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4451,9 +4451,13 @@ static void intel_connector_check_state(struct intel_connector *connector)
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
{
+ struct drm_device *dev = connector->dev;
+
/* All the simple cases only support two dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
+ if (mode != DRM_MODE_DPMS_ON) {
mode = DRM_MODE_DPMS_OFF;
+ intel_edp_psr_inactivate(dev, false);
+ }
if (mode == connector->dpms)
return;
@@ -7501,6 +7505,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
u32 base = 0, pos = 0;
bool visible;
+ intel_edp_psr_inactivate(dev, true);
+
if (on)
base = intel_crtc->cursor_addr;
@@ -8230,12 +8236,15 @@ void intel_mark_idle(struct drm_device *dev)
gen6_rps_idle(dev->dev_private);
}
+
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
+ intel_edp_psr_inactivate(dev, true);
+
if (!i915.powersave)
return;
@@ -8336,6 +8345,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
queue_work(dev_priv->wq, &work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+
+ /* Get PSR back after set_domain inactivated it without rescheduling
+ * it back. */
+ schedule_delayed_work(&dev_priv->psr.work, msecs_to_jiffies(5000));
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -8688,6 +8701,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
+ /* Inactivate PSR early in page flip */
+ intel_edp_psr_inactivate(dev, true);
+
work->event = event;
work->crtc = crtc;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 80054bb..576c204 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1578,21 +1578,35 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool vlv_edp_is_psr_enabled_on_pipe(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
- return dev_priv->psr.sink_support;
+ val = I915_READ(VLV_PSRSTAT(pipe)) &
+ VLV_EDP_PSR_CURR_STATE_MASK;
+ return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
+ (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!HAS_PSR(dev))
- return false;
+ if (HAS_PSR(dev)) {
+ if (IS_VALLEYVIEW(dev))
+ return vlv_edp_is_psr_enabled_on_pipe(dev, PIPE_A) ||
+ vlv_edp_is_psr_enabled_on_pipe(dev, PIPE_B);
+ else
+ return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ }
- return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+ return false;
}
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1624,32 +1638,60 @@ static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
+static void intel_edp_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct drm_device *dev = dev_priv->dev;
+
+ intel_edp_psr_update(dev);
+}
+
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
+ uint32_t val;
if (dev_priv->psr.setup_done)
return;
- mutex_init(&dev_priv->psr.lock);
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
-
- /* Avoid continuous PSR exit by masking memup and hpd */
- I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ if (IS_VALLEYVIEW(dev)) {
+ val = I915_READ(VLV_VSCSDP(PIPE_A));
+ val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
+ val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
+ I915_WRITE(VLV_VSCSDP(PIPE_A), val);
+
+ val = I915_READ(VLV_VSCSDP(PIPE_B));
+ val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
+ val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
+ I915_WRITE(VLV_VSCSDP(PIPE_B), val);
+ } else {
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+ }
dev_priv->psr.setup_done = true;
}
+static void vlv_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ /* Enable PSR in sink */
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+}
+
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -1680,6 +1722,27 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
+static void vlv_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+
+ uint32_t idle_frames = 1;
+ uint32_t val = 0;
+
+ val |= VLV_EDP_PSR_ENABLE;
+ val &= ~VLV_EDP_PSR_MODE_MASK;
+
+ val |= VLV_EDP_PSR_MODE_HW_TIMER;
+ val &= ~VLV_EDP_PSR_FRAME_COUNT_MASK;
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+}
+
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -1721,8 +1784,8 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
- (dig_port->port != PORT_A)) {
+ if (HAS_DDI(dev) && ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+ (dig_port->port != PORT_A))) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
@@ -1767,23 +1830,45 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ /* Baytrail supports per-pipe PSR configuration, however PSR on
+ * PIPE_B isn't working properly. So let's keep it disabled for now. */
+ if (IS_VALLEYVIEW(dev) && intel_crtc->pipe != PIPE_A) {
+ DRM_DEBUG_KMS("PSR on BYT isn't enabled on pipe B.\n");
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
- if (!intel_edp_psr_match_conditions(intel_dp) ||
- intel_edp_is_psr_enabled(dev))
- return;
+ if (IS_VALLEYVIEW(dev)) {
+ if (vlv_edp_is_psr_enabled_on_pipe(dev, intel_crtc->pipe))
+ return;
+ } else
+ if (intel_edp_is_psr_enabled(dev))
+ return;
/* Enable PSR on the panel */
- intel_edp_psr_enable_sink(intel_dp);
+ if (IS_VALLEYVIEW(dev))
+ vlv_edp_psr_enable_sink(intel_dp);
+ else
+ intel_edp_psr_enable_sink(intel_dp);
/* Enable PSR on the host */
- intel_edp_psr_enable_source(intel_dp);
+ if (IS_VALLEYVIEW(dev))
+ vlv_edp_psr_enable_source(intel_dp);
+ else
+ intel_edp_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
}
void intel_edp_psr_enable(struct intel_dp *intel_dp)
@@ -1791,16 +1876,43 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!is_edp_psr(intel_dp))
+ return;
+
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
mutex_lock(&dev_priv->psr.lock);
- if (intel_edp_psr_match_conditions(intel_dp) &&
- !intel_edp_is_psr_enabled(dev))
+ if (intel_edp_psr_match_conditions(intel_dp))
intel_edp_psr_do_enable(intel_dp);
mutex_unlock(&dev_priv->psr.lock);
}
+void vlv_edp_psr_disable(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(intel_dig_port->base.base.crtc);
+ uint32_t val;
+
+ if (!dev_priv->psr.setup_done)
+ return;
+
+ intel_edp_psr_inactivate(dev, false);
+
+ if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
+ VLV_EDP_PSR_IN_TRANS) == 0, 250))
+ WARN(1, "PSR transition took longer than expected\n");
+
+ val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
+ val &= ~VLV_EDP_PSR_ENABLE;
+ val &= ~VLV_EDP_PSR_MODE_MASK;
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+}
+
void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -1823,6 +1935,7 @@ void intel_edp_psr_update(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct intel_dp *intel_dp = NULL;
+ struct intel_crtc *intel_crtc;
if (!dev_priv->psr.setup_done)
return;
@@ -1830,26 +1943,93 @@ void intel_edp_psr_update(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
if (encoder->type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(&encoder->base);
-
- if (!is_edp_psr(dev))
- return;
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
mutex_lock(&dev_priv->psr.lock);
- if (!intel_edp_psr_match_conditions(intel_dp))
- intel_edp_psr_disable(intel_dp);
- else
+ if (!intel_edp_psr_match_conditions(intel_dp)) {
+ if (IS_VALLEYVIEW(dev))
+ vlv_edp_psr_disable(intel_dp);
+ else
+ intel_edp_psr_disable(intel_dp);
+ } else
if (!intel_edp_is_psr_enabled(dev))
intel_edp_psr_do_enable(intel_dp);
mutex_unlock(&dev_priv->psr.lock);
}
}
+void intel_edp_psr_do_inactivate(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_crtc *intel_crtc;
+ struct intel_dp *intel_dp = NULL;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+
+ if (connector->base.dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ encoder = to_intel_encoder(connector->base.encoder);
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (!vlv_edp_is_psr_enabled_on_pipe(dev,
+ intel_crtc->pipe))
+ continue;
+
+ dev_priv->psr.active = false;
+
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe),
+ VLV_EDP_PSR_RESET);
+ /* WaClearPSRReset:vlv */
+ I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), 0);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ }
+ }
+}
+
+void intel_edp_psr_inactivate(struct drm_device *dev, bool schedule_back)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ if (!dev_priv->psr.setup_done)
+ return;
+
+ /* If it was requested to not turn psr back delayed work must be
+ * canceled even if it is already on inactivated state. */
+ if (!schedule_back)
+ cancel_delayed_work_sync(&dev_priv->psr.work);
+
+ if (!dev_priv->psr.active)
+ return;
+
+ cancel_delayed_work_sync(&dev_priv->psr.work);
+
+ intel_edp_psr_do_inactivate(dev);
+
+ if (schedule_back)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(5000));
+}
+
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
+ if (IS_VALLEYVIEW(dev))
+ vlv_edp_psr_disable(intel_dp);
+
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_backlight_off(intel_dp);
@@ -1906,6 +2086,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
+ intel_edp_psr_enable(intel_dp);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@ -3846,8 +4027,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
BUG();
}
- if (is_edp(intel_dp))
+ if (is_edp(intel_dp)) {
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+ }
error = intel_dp_i2c_init(intel_dp, intel_connector, name);
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 71c1371..d8103f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -748,6 +748,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_edp_psr_update(struct drm_device *dev);
+void intel_edp_psr_inactivate(struct drm_device *dev, bool schedule_back);
/* intel_dsi.c */
--
1.8.1.2
More information about the Intel-gfx
mailing list