[PATCH 4/4] drm/i915/display/psr: Trigger flips while continuous full frame fetch is enabled
José Roberto de Souza
jose.souza at intel.com
Sat Apr 2 13:19:11 UTC 2022
Continuous full frame and single full frame still requires a "flip" to
HW actually update the screen, justing setting continuous full frame
will not keep the screen updated.
So here changing the PSR work to delayed work, so we can trigger a
flip by writing to CURSURFLIVE at the same frequency as
display updates, keeping the screen updated while the frontbuffer
is in invalid state.
Cc: Jouni Högander <jouni.hogander at intel.com>
Signed-off-by: José Roberto de Souza <jose.souza at intel.com>
---
.../drm/i915/display/intel_display_types.h | 3 +-
drivers/gpu/drm/i915/display/intel_psr.c | 30 +++++++++++++++----
2 files changed, 26 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index cfd042117b109..c52ac334c2147 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1518,7 +1518,7 @@ struct intel_psr {
enum pipe pipe;
enum transcoder transcoder;
bool active;
- struct work_struct work;
+ struct delayed_work work;
unsigned int busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
@@ -1526,6 +1526,7 @@ struct intel_psr {
bool psr2_enabled;
bool psr2_sel_fetch_enabled;
bool psr2_sel_fetch_cff_enabled;
+ u32 frame_period_jiffies;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 5b1963b7743dc..346fe01df38ec 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -243,7 +243,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
val |= EDP_PSR_ERROR(trans_shift);
intel_de_write(dev_priv, imr_reg, val);
- schedule_work(&intel_dp->psr.work);
+ mod_delayed_work(system_wq, &intel_dp->psr.work, 1);
}
}
@@ -1222,6 +1222,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state));
+ intel_dp->psr.frame_period_jiffies = val;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
@@ -1374,7 +1376,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
intel_psr_disable_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
- cancel_work_sync(&intel_dp->psr.work);
+ cancel_delayed_work_sync(&intel_dp->psr.work);
cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}
@@ -1408,7 +1410,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
mutex_unlock(&psr->lock);
- cancel_work_sync(&psr->work);
+ cancel_delayed_work_sync(&psr->work);
cancel_delayed_work_sync(&psr->dc3co_work);
}
@@ -1456,6 +1458,11 @@ static inline u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
+/*
+ * Continuous full frame and single full frame still requires a "flip" to HW
+ * actually update the screen, justing setting continuous full frame will not
+ * keep the screen updated
+ */
static inline u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
{
return IS_ALDERLAKE_P(dev_priv) ?
@@ -2113,7 +2120,8 @@ static void intel_psr_handle_irq(struct intel_dp *intel_dp)
static void intel_psr_work(struct work_struct *work)
{
struct intel_dp *intel_dp =
- container_of(work, typeof(*intel_dp), psr.work);
+ container_of(work, typeof(*intel_dp), psr.work.work);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
mutex_lock(&intel_dp->psr.lock);
@@ -2123,6 +2131,13 @@ static void intel_psr_work(struct work_struct *work)
if (READ_ONCE(intel_dp->psr.irq_aux_error))
intel_psr_handle_irq(intel_dp);
+ if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+ mod_delayed_work(system_wq, &intel_dp->psr.work,
+ intel_dp->psr.frame_period_jiffies);
+ goto unlock;
+ }
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
@@ -2161,6 +2176,8 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ mod_delayed_work(system_wq, &intel_dp->psr.work,
+ intel_dp->psr.frame_period_jiffies);
} else {
intel_psr_exit(intel_dp);
}
@@ -2254,6 +2271,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
val);
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+ cancel_delayed_work(&intel_dp->psr.work);
}
} else {
/*
@@ -2266,7 +2284,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
psr_force_hw_tracking_exit(intel_dp);
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
- schedule_work(&intel_dp->psr.work);
+ mod_delayed_work(system_wq, &intel_dp->psr.work, 1);
}
}
@@ -2369,7 +2387,7 @@ void intel_psr_init(struct intel_dp *intel_dp)
/* For new platforms up to TGL let's respect VBT back again */
intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
- INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&intel_dp->psr.work, intel_psr_work);
INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
mutex_init(&intel_dp->psr.lock);
}
--
2.35.1
More information about the Intel-gfx-trybot
mailing list