[Intel-gfx] [PATCH v7 1/2] drm/i915/display: Support PSR Multiple Transcoders
Anshuman Gupta
anshuman.gupta at intel.com
Wed Dec 16 13:26:01 UTC 2020
On 2020-12-16 at 14:47:42 +0200, Gwan-gyeong Mun wrote:
> It is a preliminary work for supporting multiple EDP PSR and
> DP PanelReplay. And it refactors singleton PSR to Multi Transcoder
> supportable PSR.
> And this moves and renames the i915_psr structure of drm_i915_private's to
> intel_dp's intel_psr structure.
> It also causes changes in PSR interrupt handling routine for supporting
> multiple transcoders. But it does not change the scenario and timing of
> enabling and disabling PSR. And it not support multiple pipes with
> a single transcoder PSR case yet.
>
> v2: Fix indentation and add comments
> v3: Remove Blank line
> v4: Rebased
> v5: Rebased and Addressed Anshuman's review comment.
> - Move calling of intel_psr_init() to intel_dp_init_connector()
> v6: Address Anshuman's review comments
> - Remove wrong comments and add comments for a limit of supporting of
> a single pipe PSR
You missed some comment to address provided on v5.
Is debugfs print in drrs_status_per_crtc is not required anymore ?
Also please use drm_{dbg,warn} at every place in this patch.
> v7: Update intel_psr_compute_config() for supporting multiple transcoder
> PSR on BDW+
Could you please send this in a separate patch, remove the PORT_A
restriction so that we can support multiple psr instances.
Thanks,
Anshuman Gupta.
>
> Signed-off-by: Gwan-gyeong Mun <gwan-gyeong.mun at intel.com>
> Cc: José Roberto de Souza <jose.souza at intel.com>
> Cc: Juha-Pekka Heikkila <juhapekka.heikkila at gmail.com>
> Cc: Anshuman Gupta <anshuman.gupta at intel.com>
> ---
> drivers/gpu/drm/i915/display/intel_ddi.c | 3 +
> drivers/gpu/drm/i915/display/intel_display.c | 4 -
> .../drm/i915/display/intel_display_debugfs.c | 111 ++--
> .../drm/i915/display/intel_display_types.h | 38 ++
> drivers/gpu/drm/i915/display/intel_dp.c | 23 +-
> drivers/gpu/drm/i915/display/intel_psr.c | 588 +++++++++---------
> drivers/gpu/drm/i915/display/intel_psr.h | 14 +-
> drivers/gpu/drm/i915/display/intel_sprite.c | 6 +-
> drivers/gpu/drm/i915/i915_drv.h | 38 --
> drivers/gpu/drm/i915/i915_irq.c | 49 +-
> 10 files changed, 490 insertions(+), 384 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
> index 6863236df1d0..4b87f72cb9c0 100644
> --- a/drivers/gpu/drm/i915/display/intel_ddi.c
> +++ b/drivers/gpu/drm/i915/display/intel_ddi.c
> @@ -4320,7 +4320,10 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
>
> intel_ddi_set_dp_msa(crtc_state, conn_state);
>
> + //TODO: move PSR related functions into intel_psr_update()
> + intel_psr2_program_trans_man_trk_ctl(intel_dp, crtc_state);
> intel_psr_update(intel_dp, crtc_state, conn_state);
> +
> intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
> intel_edp_drrs_update(intel_dp, crtc_state);
>
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 78452de5e12f..a753647b0bcb 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -15869,8 +15869,6 @@ static void commit_pipe_config(struct intel_atomic_state *state,
>
> if (new_crtc_state->update_pipe)
> intel_pipe_fastset(old_crtc_state, new_crtc_state);
> -
> - intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
> }
>
> if (dev_priv->display.atomic_update_watermarks)
> @@ -17829,8 +17827,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
> intel_dvo_init(dev_priv);
> }
>
> - intel_psr_init(dev_priv);
> -
> for_each_intel_encoder(&dev_priv->drm, encoder) {
> encoder->base.possible_crtcs =
> intel_encoder_possible_crtcs(encoder);
> diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
> index cd7e5519ee7d..041053167d7f 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
> +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
> @@ -249,18 +249,17 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
> "sink internal error",
> };
> struct drm_connector *connector = m->private;
> - struct drm_i915_private *dev_priv = to_i915(connector->dev);
> struct intel_dp *intel_dp =
> intel_attached_dp(to_intel_connector(connector));
> int ret;
>
> - if (!CAN_PSR(dev_priv)) {
> - seq_puts(m, "PSR Unsupported\n");
> + if (connector->status != connector_status_connected)
> return -ENODEV;
> - }
>
> - if (connector->status != connector_status_connected)
> + if (!CAN_PSR(intel_dp)) {
> + seq_puts(m, "PSR Unsupported\n");
> return -ENODEV;
> + }
>
> ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
>
> @@ -280,12 +279,13 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
> DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
>
> static void
> -psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
> +psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
> {
> u32 val, status_val;
> const char *status = "unknown";
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> - if (dev_priv->psr.psr2_enabled) {
> + if (intel_dp->psr.psr2_enabled) {
> static const char * const live_status[] = {
> "IDLE",
> "CAPTURE",
> @@ -300,7 +300,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
> "TG_ON"
> };
> val = intel_de_read(dev_priv,
> - EDP_PSR2_STATUS(dev_priv->psr.transcoder));
> + EDP_PSR2_STATUS(intel_dp->psr.transcoder));
> status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
> EDP_PSR2_STATUS_STATE_SHIFT;
> if (status_val < ARRAY_SIZE(live_status))
> @@ -317,7 +317,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
> "SRDENT_ON",
> };
> val = intel_de_read(dev_priv,
> - EDP_PSR_STATUS(dev_priv->psr.transcoder));
> + EDP_PSR_STATUS(intel_dp->psr.transcoder));
> status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
> EDP_PSR_STATUS_STATE_SHIFT;
> if (status_val < ARRAY_SIZE(live_status))
> @@ -327,21 +327,18 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
> seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
> }
>
> -static int i915_edp_psr_status(struct seq_file *m, void *data)
> +static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
> {
> - struct drm_i915_private *dev_priv = node_to_i915(m->private);
> - struct i915_psr *psr = &dev_priv->psr;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> + struct intel_psr *psr = &intel_dp->psr;
> intel_wakeref_t wakeref;
> const char *status;
> bool enabled;
> u32 val;
>
> - if (!HAS_PSR(dev_priv))
> - return -ENODEV;
> -
> seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
> - if (psr->dp)
> - seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
> + if (psr->sink_support)
> + seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
> seq_puts(m, "\n");
>
> if (!psr->sink_support)
> @@ -365,16 +362,16 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
>
> if (psr->psr2_enabled) {
> val = intel_de_read(dev_priv,
> - EDP_PSR2_CTL(dev_priv->psr.transcoder));
> + EDP_PSR2_CTL(intel_dp->psr.transcoder));
> enabled = val & EDP_PSR2_ENABLE;
> } else {
> val = intel_de_read(dev_priv,
> - EDP_PSR_CTL(dev_priv->psr.transcoder));
> + EDP_PSR_CTL(intel_dp->psr.transcoder));
> enabled = val & EDP_PSR_ENABLE;
> }
> seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
> enableddisabled(enabled), val);
> - psr_source_status(dev_priv, m);
> + psr_source_status(intel_dp, m);
> seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
> psr->busy_frontbuffer_bits);
>
> @@ -383,7 +380,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
> */
> if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
> val = intel_de_read(dev_priv,
> - EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
> + EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
> val &= EDP_PSR_PERF_CNT_MASK;
> seq_printf(m, "Performance counter: %u\n", val);
> }
> @@ -404,7 +401,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
> */
> for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
> val = intel_de_read(dev_priv,
> - PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
> + PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
> su_frames_val[frame / 3] = val;
> }
>
> @@ -430,23 +427,57 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
> return 0;
> }
>
> +static int i915_edp_psr_status(struct seq_file *m, void *data)
> +{
> + struct drm_i915_private *dev_priv = node_to_i915(m->private);
> + struct intel_encoder *encoder;
> + struct intel_dp *intel_dp = NULL;
> +
> + if (!HAS_PSR(dev_priv))
> + return -ENODEV;
> +
> + /* Find the first EDP */
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + if (encoder->type == INTEL_OUTPUT_EDP) {
> + intel_dp = enc_to_intel_dp(encoder);
> + break;
> + }
> + }
> +
> + if (!intel_dp)
> + return -ENODEV;
> +
> + return intel_psr_status(m, intel_dp);
> +}
> +
> static int
> i915_edp_psr_debug_set(void *data, u64 val)
> {
> struct drm_i915_private *dev_priv = data;
> intel_wakeref_t wakeref;
> - int ret;
> + int ret = -ENODEV;
> + struct intel_encoder *encoder;
>
> - if (!CAN_PSR(dev_priv))
> - return -ENODEV;
> + if (!HAS_PSR(dev_priv))
> + return ret;
>
> - drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
>
> - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
> + if (!CAN_PSR(intel_dp))
> + continue;
>
> - ret = intel_psr_debug_set(dev_priv, val);
> + if (encoder->type == INTEL_OUTPUT_EDP) {
> + drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
>
> - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
> + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
> +
> + // TODO: split to each transcoder's PSR debug state
> + ret = intel_psr_debug_set(intel_dp, val);
> +
> + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
> + }
> + }
>
> return ret;
> }
> @@ -455,12 +486,25 @@ static int
> i915_edp_psr_debug_get(void *data, u64 *val)
> {
> struct drm_i915_private *dev_priv = data;
> + struct intel_encoder *encoder;
>
> - if (!CAN_PSR(dev_priv))
> + if (!HAS_PSR(dev_priv))
> return -ENODEV;
>
> - *val = READ_ONCE(dev_priv->psr.debug);
> - return 0;
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
> +
> + if (!CAN_PSR(intel_dp))
> + continue;
> +
> + // TODO: split to each transcoder's PSR debug state
> + if (encoder->type == INTEL_OUTPUT_EDP) {
> + *val = READ_ONCE(intel_dp->psr.debug);
> + return 0;
> + }
> + }
> +
> + return -ENODEV;
> }
>
> DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
> @@ -1234,9 +1278,6 @@ static void drrs_status_per_crtc(struct seq_file *m,
> /* disable_drrs() will make drrs->dp NULL */
> if (!drrs->dp) {
> seq_puts(m, "Idleness DRRS: Disabled\n");
> - if (dev_priv->psr.enabled)
> - seq_puts(m,
> - "\tAs PSR is enabled, DRRS is not enabled\n");
> mutex_unlock(&drrs->mutex);
> return;
> }
> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
> index 5bc5bfbc4551..9899fddf4c99 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_types.h
> +++ b/drivers/gpu/drm/i915/display/intel_display_types.h
> @@ -1339,6 +1339,42 @@ struct intel_dp_compliance {
> u8 test_lane_count;
> };
>
> +struct intel_psr {
> + /* Mutex for PSR state of the transcoder */
> + struct mutex lock;
> +
> +#define I915_PSR_DEBUG_MODE_MASK 0x0f
> +#define I915_PSR_DEBUG_DEFAULT 0x00
> +#define I915_PSR_DEBUG_DISABLE 0x01
> +#define I915_PSR_DEBUG_ENABLE 0x02
> +#define I915_PSR_DEBUG_FORCE_PSR1 0x03
> +#define I915_PSR_DEBUG_IRQ 0x10
> +
> + u32 debug;
> + bool sink_support;
> + bool enabled;
> + enum pipe pipe;
> + enum transcoder transcoder;
> + bool active;
> + struct work_struct work;
> + unsigned int busy_frontbuffer_bits;
> + bool sink_psr2_support;
> + bool link_standby;
> + bool colorimetry_support;
> + bool psr2_enabled;
> + bool psr2_sel_fetch_enabled;
> + u8 sink_sync_latency;
> + ktime_t last_entry_attempt;
> + ktime_t last_exit;
> + bool sink_not_reliable;
> + bool irq_aux_error;
> + u16 su_x_granularity;
> + bool dc3co_enabled;
> + u32 dc3co_exit_delay;
> + struct delayed_work dc3co_work;
> + struct drm_dp_vsc_sdp vsc;
> +};
> +
> struct intel_dp {
> i915_reg_t output_reg;
> u32 DP;
> @@ -1460,6 +1496,8 @@ struct intel_dp {
>
> bool hobl_failed;
> bool hobl_active;
> +
> + struct intel_psr psr;
> };
>
> enum lspcon_vendor {
> diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
> index b2bc0c8c39c7..5730cad7b6ac 100644
> --- a/drivers/gpu/drm/i915/display/intel_dp.c
> +++ b/drivers/gpu/drm/i915/display/intel_dp.c
> @@ -2702,12 +2702,10 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
> const struct drm_connector_state *conn_state,
> struct drm_dp_vsc_sdp *vsc)
> {
> - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> -
> vsc->sdp_type = DP_SDP_VSC;
>
> - if (dev_priv->psr.psr2_enabled) {
> - if (dev_priv->psr.colorimetry_support &&
> + if (intel_dp->psr.psr2_enabled) {
> + if (intel_dp->psr.colorimetry_support &&
> intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
> /* [PSR2, +Colorimetry] */
> intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
> @@ -3844,7 +3842,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
> return false;
> }
>
> - if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
> + if (CAN_PSR(intel_dp) && intel_dp_is_edp(intel_dp)) {
> drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
> crtc_state->uapi.mode_changed = true;
> return false;
> @@ -8070,6 +8068,17 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
> drm_kms_helper_hotplug_event(connector->dev);
> }
>
> +static void intel_dp_update_pipe(struct intel_atomic_state *state,
> + struct intel_encoder *encoder,
> + const struct intel_crtc_state *crtc_state,
> + const struct drm_connector_state *conn_state)
> +{
> + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
> +
> + intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
> + intel_psr2_program_trans_man_trk_ctl(intel_dp, crtc_state);
> +}
> +
> bool
> intel_dp_init_connector(struct intel_digital_port *dig_port,
> struct intel_connector *intel_connector)
> @@ -8184,6 +8193,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
> (temp & ~0xf) | 0xd);
> }
>
> + intel_psr_init(intel_dp);
> +
> return true;
>
> fail:
> @@ -8225,7 +8236,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
> intel_encoder->get_config = intel_dp_get_config;
> intel_encoder->sync_state = intel_dp_sync_state;
> intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
> - intel_encoder->update_pipe = intel_panel_update_backlight;
> + intel_encoder->update_pipe = intel_dp_update_pipe;
> intel_encoder->suspend = intel_dp_encoder_suspend;
> intel_encoder->shutdown = intel_dp_encoder_shutdown;
> if (IS_CHERRYVIEW(dev_priv)) {
> diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
> index d9a395c486d3..eed7754c2f74 100644
> --- a/drivers/gpu/drm/i915/display/intel_psr.c
> +++ b/drivers/gpu/drm/i915/display/intel_psr.c
> @@ -79,11 +79,13 @@
> * use page flips.
> */
>
> -static bool psr_global_enabled(struct drm_i915_private *i915)
> +static bool psr_global_enabled(struct intel_dp *intel_dp)
> {
> - switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> +
> + switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
> case I915_PSR_DEBUG_DEFAULT:
> - return i915->params.enable_psr;
> + return dev_priv->params.enable_psr;
> case I915_PSR_DEBUG_DISABLE:
> return false;
> default:
> @@ -91,9 +93,9 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
> }
> }
>
> -static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
> +static bool psr2_global_enabled(struct intel_dp *intel_dp)
> {
> - switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
> + switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
> case I915_PSR_DEBUG_DISABLE:
> case I915_PSR_DEBUG_FORCE_PSR1:
> return false;
> @@ -102,11 +104,12 @@ static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
> }
> }
>
> -static void psr_irq_control(struct drm_i915_private *dev_priv)
> +static void psr_irq_control(struct intel_dp *intel_dp)
> {
> enum transcoder trans_shift;
> u32 mask, val;
> i915_reg_t imr_reg;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> /*
> * gen12+ has registers relative to transcoder and one per transcoder
> @@ -115,14 +118,14 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
> */
> if (INTEL_GEN(dev_priv) >= 12) {
> trans_shift = 0;
> - imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
> + imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
> } else {
> - trans_shift = dev_priv->psr.transcoder;
> + trans_shift = intel_dp->psr.transcoder;
> imr_reg = EDP_PSR_IMR;
> }
>
> mask = EDP_PSR_ERROR(trans_shift);
> - if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
> + if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
> mask |= EDP_PSR_POST_EXIT(trans_shift) |
> EDP_PSR_PRE_ENTRY(trans_shift);
>
> @@ -171,38 +174,37 @@ static void psr_event_print(struct drm_i915_private *i915,
> drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
> }
>
> -void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
> +void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
> {
> - enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> + enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
> enum transcoder trans_shift;
> i915_reg_t imr_reg;
> ktime_t time_ns = ktime_get();
>
> if (INTEL_GEN(dev_priv) >= 12) {
> trans_shift = 0;
> - imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
> + imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
> } else {
> - trans_shift = dev_priv->psr.transcoder;
> + trans_shift = intel_dp->psr.transcoder;
> imr_reg = EDP_PSR_IMR;
> }
>
> if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
> - dev_priv->psr.last_entry_attempt = time_ns;
> - drm_dbg_kms(&dev_priv->drm,
> - "[transcoder %s] PSR entry attempt in 2 vblanks\n",
> - transcoder_name(cpu_transcoder));
> + intel_dp->psr.last_entry_attempt = time_ns;
> + DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
> + transcoder_name(cpu_transcoder));
> }
>
> if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
> - dev_priv->psr.last_exit = time_ns;
> - drm_dbg_kms(&dev_priv->drm,
> - "[transcoder %s] PSR exit completed\n",
> - transcoder_name(cpu_transcoder));
> + intel_dp->psr.last_exit = time_ns;
> + DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
> + transcoder_name(cpu_transcoder));
>
> if (INTEL_GEN(dev_priv) >= 9) {
> u32 val = intel_de_read(dev_priv,
> PSR_EVENT(cpu_transcoder));
> - bool psr2_enabled = dev_priv->psr.psr2_enabled;
> + bool psr2_enabled = intel_dp->psr.psr2_enabled;
>
> intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
> val);
> @@ -213,10 +215,10 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
> if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
> u32 val;
>
> - drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
> + DRM_WARN("[transcoder %s] PSR aux error\n",
> transcoder_name(cpu_transcoder));
>
> - dev_priv->psr.irq_aux_error = true;
> + intel_dp->psr.irq_aux_error = true;
>
> /*
> * If this interruption is not masked it will keep
> @@ -230,7 +232,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
> val |= EDP_PSR_ERROR(trans_shift);
> intel_de_write(dev_priv, imr_reg, val);
>
> - schedule_work(&dev_priv->psr.work);
> + schedule_work(&intel_dp->psr.work);
> }
> }
>
> @@ -291,12 +293,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
> struct drm_i915_private *dev_priv =
> to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
>
> - if (dev_priv->psr.dp) {
> - drm_warn(&dev_priv->drm,
> - "More than one eDP panel found, PSR support should be extended\n");
> - return;
> - }
> -
> drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
> sizeof(intel_dp->psr_dpcd));
>
> @@ -317,12 +313,10 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
> return;
> }
>
> - dev_priv->psr.sink_support = true;
> - dev_priv->psr.sink_sync_latency =
> + intel_dp->psr.sink_support = true;
> + intel_dp->psr.sink_sync_latency =
> intel_dp_get_sink_sync_latency(intel_dp);
>
> - dev_priv->psr.dp = intel_dp;
> -
> if (INTEL_GEN(dev_priv) >= 9 &&
> (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
> bool y_req = intel_dp->psr_dpcd[1] &
> @@ -340,14 +334,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
> * Y-coordinate requirement panels we would need to enable
> * GTC first.
> */
> - dev_priv->psr.sink_psr2_support = y_req && alpm;
> + intel_dp->psr.sink_psr2_support = y_req && alpm;
> drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
> - dev_priv->psr.sink_psr2_support ? "" : "not ");
> + intel_dp->psr.sink_psr2_support ? "" : "not ");
>
> - if (dev_priv->psr.sink_psr2_support) {
> - dev_priv->psr.colorimetry_support =
> + if (intel_dp->psr.sink_psr2_support) {
> + intel_dp->psr.colorimetry_support =
> intel_dp_get_colorimetry_status(intel_dp);
> - dev_priv->psr.su_x_granularity =
> + intel_dp->psr.su_x_granularity =
> intel_dp_get_su_x_granulartiy(intel_dp);
> }
> }
> @@ -373,7 +367,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
> BUILD_BUG_ON(sizeof(aux_msg) > 20);
> for (i = 0; i < sizeof(aux_msg); i += 4)
> intel_de_write(dev_priv,
> - EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
> + EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
> intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
>
> aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
> @@ -384,7 +378,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
>
> /* Select only valid bits for SRD_AUX_CTL */
> aux_ctl &= psr_aux_mask;
> - intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
> + intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
> aux_ctl);
> }
>
> @@ -394,14 +388,14 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
> u8 dpcd_val = DP_PSR_ENABLE;
>
> /* Enable ALPM at sink for psr2 */
> - if (dev_priv->psr.psr2_enabled) {
> + if (intel_dp->psr.psr2_enabled) {
> drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
> DP_ALPM_ENABLE |
> DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
>
> dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
> } else {
> - if (dev_priv->psr.link_standby)
> + if (intel_dp->psr.link_standby)
> dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
>
> if (INTEL_GEN(dev_priv) >= 8)
> @@ -464,7 +458,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
> * off-by-one issue that HW has in some cases.
> */
> idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
> - idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
> + idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
>
> if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
> idle_frames = 0xf;
> @@ -484,7 +478,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
> if (IS_HASWELL(dev_priv))
> val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
>
> - if (dev_priv->psr.link_standby)
> + if (intel_dp->psr.link_standby)
> val |= EDP_PSR_LINK_STANDBY;
>
> val |= intel_psr1_get_tp_time(intel_dp);
> @@ -492,9 +486,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
> if (INTEL_GEN(dev_priv) >= 8)
> val |= EDP_PSR_CRC_ENABLE;
>
> - val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
> + val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
> EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
> - intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
> + intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
> }
>
> static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
> @@ -529,7 +523,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
> if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
> val |= EDP_Y_COORDINATE_ENABLE;
>
> - val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
> + val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
> val |= intel_psr2_get_tp_time(intel_dp);
>
> if (INTEL_GEN(dev_priv) >= 12) {
> @@ -548,7 +542,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
> val |= EDP_PSR2_FAST_WAKE(7);
> }
>
> - if (dev_priv->psr.psr2_sel_fetch_enabled) {
> + if (intel_dp->psr.psr2_sel_fetch_enabled) {
> /* WA 1408330847 */
> if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
> IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
> @@ -557,20 +551,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
> DIS_RAM_BYPASS_PSR2_MAN_TRACK);
>
> intel_de_write(dev_priv,
> - PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
> + PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
> PSR2_MAN_TRK_CTL_ENABLE);
> } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
> intel_de_write(dev_priv,
> - PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
> + PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
> }
>
> /*
> * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
> * recommending keep this bit unset while PSR2 is enabled.
> */
> - intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
> + intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
>
> - intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
> + intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
> }
>
> static bool
> @@ -593,55 +587,58 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
> drm_mode_vrefresh(&cstate->hw.adjusted_mode));
> }
>
> -static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
> +static void psr2_program_idle_frames(struct intel_dp *intel_dp,
> u32 idle_frames)
> {
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> u32 val;
>
> idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
> - val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
> + val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
> val &= ~EDP_PSR2_IDLE_FRAME_MASK;
> val |= idle_frames;
> - intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
> + intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
> }
>
> -static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
> +static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
> {
> - psr2_program_idle_frames(dev_priv, 0);
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> +
> + psr2_program_idle_frames(intel_dp, 0);
> intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
> }
>
> -static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
> +static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
> {
> - struct intel_dp *intel_dp = dev_priv->psr.dp;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
> - psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
> + psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
> }
>
> static void tgl_dc3co_disable_work(struct work_struct *work)
> {
> - struct drm_i915_private *dev_priv =
> - container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
> + struct intel_dp *intel_dp =
> + container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
>
> - mutex_lock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
> /* If delayed work is pending, it is not idle */
> - if (delayed_work_pending(&dev_priv->psr.dc3co_work))
> + if (delayed_work_pending(&intel_dp->psr.dc3co_work))
> goto unlock;
>
> - tgl_psr2_disable_dc3co(dev_priv);
> + tgl_psr2_disable_dc3co(intel_dp);
> unlock:
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_unlock(&intel_dp->psr.lock);
> }
>
> -static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
> +static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
> {
> - if (!dev_priv->psr.dc3co_enabled)
> + if (!intel_dp->psr.dc3co_enabled)
> return;
>
> - cancel_delayed_work(&dev_priv->psr.dc3co_work);
> + cancel_delayed_work(&intel_dp->psr.dc3co_work);
> /* Before PSR2 exit disallow dc3co*/
> - tgl_psr2_disable_dc3co(dev_priv);
> + tgl_psr2_disable_dc3co(intel_dp);
> }
>
> static void
> @@ -714,7 +711,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
> int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
> int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
>
> - if (!dev_priv->psr.sink_psr2_support)
> + if (!intel_dp->psr.sink_psr2_support)
> return false;
>
> if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
> @@ -724,7 +721,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
> return false;
> }
>
> - if (!psr2_global_enabled(dev_priv)) {
> + if (!psr2_global_enabled(intel_dp)) {
> drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
> return false;
> }
> @@ -773,10 +770,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
> * only need to validate the SU block width is a multiple of
> * x granularity.
> */
> - if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
> + if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
> drm_dbg_kms(&dev_priv->drm,
> "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
> - crtc_hdisplay, dev_priv->psr.su_x_granularity);
> + crtc_hdisplay, intel_dp->psr.su_x_granularity);
> return false;
> }
>
> @@ -811,30 +808,25 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
> &crtc_state->hw.adjusted_mode;
> int psr_setup_time;
>
> - if (!CAN_PSR(dev_priv))
> - return;
> -
> - if (intel_dp != dev_priv->psr.dp)
> + if (!CAN_PSR(intel_dp))
> return;
>
> - if (!psr_global_enabled(dev_priv)) {
> + if (!psr_global_enabled(intel_dp)) {
> drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
> return;
> }
>
> /*
> * HSW spec explicitly says PSR is tied to port A.
> - * BDW+ platforms have a instance of PSR registers per transcoder but
> - * for now it only supports one instance of PSR, so lets keep it
> - * hardcoded to PORT_A
> + * BDW+ platforms have a instance of PSR registers per transcoder.
> */
> - if (dig_port->base.port != PORT_A) {
> + if (IS_HASWELL(dev_priv) && dig_port->base.port != PORT_A) {
> drm_dbg_kms(&dev_priv->drm,
> "PSR condition failed: Port not supported\n");
> return;
> }
>
> - if (dev_priv->psr.sink_not_reliable) {
> + if (intel_dp->psr.sink_not_reliable) {
> drm_dbg_kms(&dev_priv->drm,
> "PSR sink implementation is not reliable\n");
> return;
> @@ -870,23 +862,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
> static void intel_psr_activate(struct intel_dp *intel_dp)
> {
> struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> + enum transcoder transcoder = intel_dp->psr.transcoder;
>
> - if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
> + if (transcoder_has_psr2(dev_priv, transcoder))
> drm_WARN_ON(&dev_priv->drm,
> - intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
> + intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
>
> drm_WARN_ON(&dev_priv->drm,
> - intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
> - drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
> - lockdep_assert_held(&dev_priv->psr.lock);
> + intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
> + drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
> + lockdep_assert_held(&intel_dp->psr.lock);
>
> /* psr1 and psr2 are mutually exclusive.*/
> - if (dev_priv->psr.psr2_enabled)
> + if (intel_dp->psr.psr2_enabled)
> hsw_activate_psr2(intel_dp);
> else
> hsw_activate_psr1(intel_dp);
>
> - dev_priv->psr.active = true;
> + intel_dp->psr.active = true;
> }
>
> static void intel_psr_enable_source(struct intel_dp *intel_dp,
> @@ -902,7 +895,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
> if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> hsw_psr_setup_aux(intel_dp);
>
> - if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
> + if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
> !IS_GEMINILAKE(dev_priv))) {
> i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
> u32 chicken = intel_de_read(dev_priv, reg);
> @@ -926,10 +919,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
> if (INTEL_GEN(dev_priv) < 11)
> mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
>
> - intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
> + intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
> mask);
>
> - psr_irq_control(dev_priv);
> + psr_irq_control(intel_dp);
>
> if (crtc_state->dc3co_exitline) {
> u32 val;
> @@ -947,30 +940,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
>
> if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
> intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
> - dev_priv->psr.psr2_sel_fetch_enabled ?
> + intel_dp->psr.psr2_sel_fetch_enabled ?
> IGNORE_PSR2_HW_TRACKING : 0);
> }
>
> -static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
> +static void intel_psr_enable_locked(struct intel_dp *intel_dp,
> const struct intel_crtc_state *crtc_state,
> const struct drm_connector_state *conn_state)
> {
> - struct intel_dp *intel_dp = dev_priv->psr.dp;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
> struct intel_encoder *encoder = &dig_port->base;
> u32 val;
>
> - drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
> + drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
>
> - dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
> - dev_priv->psr.busy_frontbuffer_bits = 0;
> - dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
> - dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
> - dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
> + intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
> + intel_dp->psr.busy_frontbuffer_bits = 0;
> + intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
> + intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
> + intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
> /* DC5/DC6 requires at least 6 idle frames */
> val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
> - dev_priv->psr.dc3co_exit_delay = val;
> - dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
> + intel_dp->psr.dc3co_exit_delay = val;
> + intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
>
> /*
> * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
> @@ -982,27 +975,27 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
> */
> if (INTEL_GEN(dev_priv) >= 12) {
> val = intel_de_read(dev_priv,
> - TRANS_PSR_IIR(dev_priv->psr.transcoder));
> + TRANS_PSR_IIR(intel_dp->psr.transcoder));
> val &= EDP_PSR_ERROR(0);
> } else {
> val = intel_de_read(dev_priv, EDP_PSR_IIR);
> - val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
> + val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
> }
> if (val) {
> - dev_priv->psr.sink_not_reliable = true;
> + intel_dp->psr.sink_not_reliable = true;
> drm_dbg_kms(&dev_priv->drm,
> "PSR interruption error set, not enabling PSR\n");
> return;
> }
>
> drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
> - dev_priv->psr.psr2_enabled ? "2" : "1");
> + intel_dp->psr.psr2_enabled ? "2" : "1");
> intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
> - &dev_priv->psr.vsc);
> - intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
> + &intel_dp->psr.vsc);
> + intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
> intel_psr_enable_sink(intel_dp);
> intel_psr_enable_source(intel_dp, crtc_state);
> - dev_priv->psr.enabled = true;
> + intel_dp->psr.enabled = true;
>
> intel_psr_activate(intel_dp);
> }
> @@ -1021,7 +1014,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
> {
> struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> - if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
> + if (!CAN_PSR(intel_dp))
> return;
>
> if (!crtc_state->has_psr)
> @@ -1029,46 +1022,47 @@ void intel_psr_enable(struct intel_dp *intel_dp,
>
> drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
>
> - mutex_lock(&dev_priv->psr.lock);
> - intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
> + intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
> + mutex_unlock(&intel_dp->psr.lock);
> }
>
> -static void intel_psr_exit(struct drm_i915_private *dev_priv)
> +static void intel_psr_exit(struct intel_dp *intel_dp)
> {
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> u32 val;
>
> - if (!dev_priv->psr.active) {
> - if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
> + if (!intel_dp->psr.active) {
> + if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
> val = intel_de_read(dev_priv,
> - EDP_PSR2_CTL(dev_priv->psr.transcoder));
> + EDP_PSR2_CTL(intel_dp->psr.transcoder));
> drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
> }
>
> val = intel_de_read(dev_priv,
> - EDP_PSR_CTL(dev_priv->psr.transcoder));
> + EDP_PSR_CTL(intel_dp->psr.transcoder));
> drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
>
> return;
> }
>
> - if (dev_priv->psr.psr2_enabled) {
> - tgl_disallow_dc3co_on_psr2_exit(dev_priv);
> + if (intel_dp->psr.psr2_enabled) {
> + tgl_disallow_dc3co_on_psr2_exit(intel_dp);
> val = intel_de_read(dev_priv,
> - EDP_PSR2_CTL(dev_priv->psr.transcoder));
> + EDP_PSR2_CTL(intel_dp->psr.transcoder));
> drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
> val &= ~EDP_PSR2_ENABLE;
> intel_de_write(dev_priv,
> - EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
> + EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
> } else {
> val = intel_de_read(dev_priv,
> - EDP_PSR_CTL(dev_priv->psr.transcoder));
> + EDP_PSR_CTL(intel_dp->psr.transcoder));
> drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
> val &= ~EDP_PSR_ENABLE;
> intel_de_write(dev_priv,
> - EDP_PSR_CTL(dev_priv->psr.transcoder), val);
> + EDP_PSR_CTL(intel_dp->psr.transcoder), val);
> }
> - dev_priv->psr.active = false;
> + intel_dp->psr.active = false;
> }
>
> static void intel_psr_disable_locked(struct intel_dp *intel_dp)
> @@ -1077,21 +1071,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
> i915_reg_t psr_status;
> u32 psr_status_mask;
>
> - lockdep_assert_held(&dev_priv->psr.lock);
> + lockdep_assert_held(&intel_dp->psr.lock);
>
> - if (!dev_priv->psr.enabled)
> + if (!intel_dp->psr.enabled)
> return;
>
> drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
> - dev_priv->psr.psr2_enabled ? "2" : "1");
> + intel_dp->psr.psr2_enabled ? "2" : "1");
>
> - intel_psr_exit(dev_priv);
> + intel_psr_exit(intel_dp);
>
> - if (dev_priv->psr.psr2_enabled) {
> - psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
> + if (intel_dp->psr.psr2_enabled) {
> + psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
> psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
> } else {
> - psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
> + psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
> psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
> }
>
> @@ -1101,7 +1095,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
> drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
>
> /* WA 1408330847 */
> - if (dev_priv->psr.psr2_sel_fetch_enabled &&
> + if (intel_dp->psr.psr2_sel_fetch_enabled &&
> (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
> IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
> intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
> @@ -1110,10 +1104,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
> /* Disable PSR on Sink */
> drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
>
> - if (dev_priv->psr.psr2_enabled)
> + if (intel_dp->psr.psr2_enabled)
> drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
>
> - dev_priv->psr.enabled = false;
> + intel_dp->psr.enabled = false;
> }
>
> /**
> @@ -1131,20 +1125,22 @@ void intel_psr_disable(struct intel_dp *intel_dp,
> if (!old_crtc_state->has_psr)
> return;
>
> - if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
> + if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
> return;
>
> - mutex_lock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
>
> intel_psr_disable_locked(intel_dp);
>
> - mutex_unlock(&dev_priv->psr.lock);
> - cancel_work_sync(&dev_priv->psr.work);
> - cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
> + mutex_unlock(&intel_dp->psr.lock);
> + cancel_work_sync(&intel_dp->psr.work);
> + cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
> }
>
> -static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
> +static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
> {
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> +
> if (IS_TIGERLAKE(dev_priv))
> /*
> * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
> @@ -1158,7 +1154,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
> * So using this workaround until this issue is root caused
> * and a better fix is found.
> */
> - intel_psr_exit(dev_priv);
> + intel_psr_exit(intel_dp);
> else if (INTEL_GEN(dev_priv) >= 9)
> /*
> * Display WA #0884: skl+
> @@ -1169,13 +1165,13 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
> * but it makes more sense write to the current active
> * pipe.
> */
> - intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
> + intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
> else
> /*
> * A write to CURSURFLIVE do not cause HW tracking to exit PSR
> * on older gens so doing the manual exit instead.
> */
> - intel_psr_exit(dev_priv);
> + intel_psr_exit(intel_dp);
> }
>
> void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
> @@ -1215,11 +1211,11 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
> intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
> }
>
> -void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
> +void intel_psr2_program_trans_man_trk_ctl(struct intel_dp *intel_dp,
> + const struct intel_crtc_state *crtc_state)
> {
> - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
> - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
> - struct i915_psr *psr = &dev_priv->psr;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> + struct intel_psr *psr = &intel_dp->psr;
>
> if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
> !crtc_state->enable_psr2_sel_fetch)
> @@ -1336,13 +1332,13 @@ void intel_psr_update(struct intel_dp *intel_dp,
> const struct drm_connector_state *conn_state)
> {
> struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> - struct i915_psr *psr = &dev_priv->psr;
> + struct intel_psr *psr = &intel_dp->psr;
> bool enable, psr2_enable;
>
> - if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
> + if (!CAN_PSR(intel_dp))
> return;
>
> - mutex_lock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
>
> enable = crtc_state->has_psr;
> psr2_enable = crtc_state->has_psr2;
> @@ -1350,15 +1346,15 @@ void intel_psr_update(struct intel_dp *intel_dp,
> if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
> /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
> if (crtc_state->crc_enabled && psr->enabled)
> - psr_force_hw_tracking_exit(dev_priv);
> + psr_force_hw_tracking_exit(intel_dp);
> else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
> /*
> * Activate PSR again after a force exit when enabling
> * CRC in older gens
> */
> - if (!dev_priv->psr.active &&
> - !dev_priv->psr.busy_frontbuffer_bits)
> - schedule_work(&dev_priv->psr.work);
> + if (!intel_dp->psr.active &&
> + !intel_dp->psr.busy_frontbuffer_bits)
> + schedule_work(&intel_dp->psr.work);
> }
>
> goto unlock;
> @@ -1368,34 +1364,23 @@ void intel_psr_update(struct intel_dp *intel_dp,
> intel_psr_disable_locked(intel_dp);
>
> if (enable)
> - intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
> + intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
>
> unlock:
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_unlock(&intel_dp->psr.lock);
> }
>
> /**
> - * intel_psr_wait_for_idle - wait for PSR1 to idle
> - * @new_crtc_state: new CRTC state
> + * psr_wait_for_idle - wait for PSR1 to idle
> + * @intel_dp: Intel DP
> * @out_value: PSR status in case of failure
> *
> - * This function is expected to be called from pipe_update_start() where it is
> - * not expected to race with PSR enable or disable.
> - *
> * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
> + *
> */
> -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
> - u32 *out_value)
> +static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
> {
> - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
> - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
> -
> - if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
> - return 0;
> -
> - /* FIXME: Update this for PSR2 if we need to wait for idle */
> - if (READ_ONCE(dev_priv->psr.psr2_enabled))
> - return 0;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> /*
> * From bspec: Panel Self Refresh (BDW+)
> @@ -1403,32 +1388,64 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
> * exit training time + 1.5 ms of aux channel handshake. 50 ms is
> * defensive enough to cover everything.
> */
> -
> return __intel_wait_for_register(&dev_priv->uncore,
> - EDP_PSR_STATUS(dev_priv->psr.transcoder),
> + EDP_PSR_STATUS(intel_dp->psr.transcoder),
> EDP_PSR_STATUS_STATE_MASK,
> EDP_PSR_STATUS_STATE_IDLE, 2, 50,
> out_value);
> }
>
> -static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
> +/**
> + * intel_psr_wait_for_idle - wait for PSR1 to idle
> + * @new_crtc_state: new CRTC state
> + *
> + * This function is expected to be called from pipe_update_start() where it is
> + * not expected to race with PSR enable or disable.
> + */
> +void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
> +{
> + struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
> + struct intel_encoder *encoder;
> + u32 psr_status;
> +
> + if (!new_crtc_state->has_psr)
> + return;
> +
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
> +
> + if (encoder->type != INTEL_OUTPUT_EDP)
> + continue;
> +
> + /* when the PSR1 is enabled */
> + if (intel_dp->psr.enabled && !intel_dp->psr.psr2_enabled) {
> + if (psr_wait_for_idle(intel_dp, &psr_status))
> + drm_err(&dev_priv->drm,
> + "PSR idle timed out 0x%x, atomic update may fail\n",
> + psr_status);
> + }
> + }
> +}
> +
> +static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
> {
> i915_reg_t reg;
> u32 mask;
> int err;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> - if (!dev_priv->psr.enabled)
> + if (!intel_dp->psr.enabled)
> return false;
>
> - if (dev_priv->psr.psr2_enabled) {
> - reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
> + if (intel_dp->psr.psr2_enabled) {
> + reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
> mask = EDP_PSR2_STATUS_STATE_MASK;
> } else {
> - reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
> + reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
> mask = EDP_PSR_STATUS_STATE_MASK;
> }
>
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_unlock(&intel_dp->psr.lock);
>
> err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
> if (err)
> @@ -1436,8 +1453,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
> "Timed out waiting for PSR Idle for re-enable\n");
>
> /* After the unlocked wait, verify that PSR is still wanted! */
> - mutex_lock(&dev_priv->psr.lock);
> - return err == 0 && dev_priv->psr.enabled;
> + mutex_lock(&intel_dp->psr.lock);
> + return err == 0 && intel_dp->psr.enabled;
> }
>
> static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
> @@ -1503,11 +1520,12 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
> return err;
> }
>
> -int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
> +int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
> {
> const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
> u32 old_mode;
> int ret;
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
>
> if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
> mode > I915_PSR_DEBUG_FORCE_PSR1) {
> @@ -1515,21 +1533,21 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
> return -EINVAL;
> }
>
> - ret = mutex_lock_interruptible(&dev_priv->psr.lock);
> + ret = mutex_lock_interruptible(&intel_dp->psr.lock);
> if (ret)
> return ret;
>
> - old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
> - dev_priv->psr.debug = val;
> + old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
> + intel_dp->psr.debug = val;
>
> /*
> * Do it right away if it's already enabled, otherwise it will be done
> * when enabling the source.
> */
> - if (dev_priv->psr.enabled)
> - psr_irq_control(dev_priv);
> + if (intel_dp->psr.enabled)
> + psr_irq_control(intel_dp);
>
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_unlock(&intel_dp->psr.lock);
>
> if (old_mode != mode)
> ret = intel_psr_fastset_force(dev_priv);
> @@ -1537,28 +1555,28 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
> return ret;
> }
>
> -static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
> +static void intel_psr_handle_irq(struct intel_dp *intel_dp)
> {
> - struct i915_psr *psr = &dev_priv->psr;
> + struct intel_psr *psr = &intel_dp->psr;
>
> - intel_psr_disable_locked(psr->dp);
> + intel_psr_disable_locked(intel_dp);
> psr->sink_not_reliable = true;
> /* let's make sure that sink is awaken */
> - drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
> + drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
> }
>
> static void intel_psr_work(struct work_struct *work)
> {
> - struct drm_i915_private *dev_priv =
> - container_of(work, typeof(*dev_priv), psr.work);
> + struct intel_dp *intel_dp =
> + container_of(work, typeof(*intel_dp), psr.work);
>
> - mutex_lock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
>
> - if (!dev_priv->psr.enabled)
> + if (!intel_dp->psr.enabled)
> goto unlock;
>
> - if (READ_ONCE(dev_priv->psr.irq_aux_error))
> - intel_psr_handle_irq(dev_priv);
> + if (READ_ONCE(intel_dp->psr.irq_aux_error))
> + intel_psr_handle_irq(intel_dp);
>
> /*
> * We have to make sure PSR is ready for re-enable
> @@ -1566,7 +1584,7 @@ static void intel_psr_work(struct work_struct *work)
> * PSR might take some time to get fully disabled
> * and be ready for re-enable.
> */
> - if (!__psr_wait_for_idle_locked(dev_priv))
> + if (!__psr_wait_for_idle_locked(intel_dp))
> goto unlock;
>
> /*
> @@ -1574,12 +1592,12 @@ static void intel_psr_work(struct work_struct *work)
> * recheck. Since psr_flush first clears this and then reschedules we
> * won't ever miss a flush when bailing out here.
> */
> - if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
> + if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
> goto unlock;
>
> - intel_psr_activate(dev_priv->psr.dp);
> + intel_psr_activate(intel_dp);
> unlock:
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_unlock(&intel_dp->psr.lock);
> }
>
> /**
> @@ -1598,27 +1616,35 @@ static void intel_psr_work(struct work_struct *work)
> void intel_psr_invalidate(struct drm_i915_private *dev_priv,
> unsigned frontbuffer_bits, enum fb_op_origin origin)
> {
> - if (!CAN_PSR(dev_priv))
> - return;
> + struct intel_encoder *encoder;
> + struct intel_dp *intel_dp;
>
> - if (origin == ORIGIN_FLIP)
> - return;
> + for_each_intel_dp(&dev_priv->drm, encoder) {
>
> - mutex_lock(&dev_priv->psr.lock);
> - if (!dev_priv->psr.enabled) {
> - mutex_unlock(&dev_priv->psr.lock);
> - return;
> - }
> + intel_dp = enc_to_intel_dp(encoder);
> + if (encoder->type != INTEL_OUTPUT_EDP)
> + continue;
> + if (!CAN_PSR(intel_dp))
> + continue;
>
> - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
> - dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
> + if (origin == ORIGIN_FLIP)
> + continue;
> +
> + mutex_lock(&intel_dp->psr.lock);
> + if (!intel_dp->psr.enabled) {
> + mutex_unlock(&intel_dp->psr.lock);
> + continue;
> + }
>
> - if (frontbuffer_bits)
> - intel_psr_exit(dev_priv);
> + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
> + intel_dp->psr.busy_frontbuffer_bits |= frontbuffer_bits;
>
> - mutex_unlock(&dev_priv->psr.lock);
> -}
> + if (frontbuffer_bits)
> + intel_psr_exit(intel_dp);
>
> + mutex_unlock(&intel_dp->psr.lock);
> + }
> +}
> /*
> * When we will be completely rely on PSR2 S/W tracking in future,
> * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
> @@ -1626,15 +1652,15 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
> * accordingly in future.
> */
> static void
> -tgl_dc3co_flush(struct drm_i915_private *dev_priv,
> - unsigned int frontbuffer_bits, enum fb_op_origin origin)
> +tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
> + enum fb_op_origin origin)
> {
> - mutex_lock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
>
> - if (!dev_priv->psr.dc3co_enabled)
> + if (!intel_dp->psr.dc3co_enabled)
> goto unlock;
>
> - if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
> + if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
> goto unlock;
>
> /*
> @@ -1642,15 +1668,15 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
> * when delayed work schedules that means display has been idle.
> */
> if (!(frontbuffer_bits &
> - INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
> + INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
> goto unlock;
>
> - tgl_psr2_enable_dc3co(dev_priv);
> - mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
> - dev_priv->psr.dc3co_exit_delay);
> + tgl_psr2_enable_dc3co(intel_dp);
> + mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
> + intel_dp->psr.dc3co_exit_delay);
>
> unlock:
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_unlock(&intel_dp->psr.lock);
> }
>
> /**
> @@ -1669,45 +1695,54 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
> void intel_psr_flush(struct drm_i915_private *dev_priv,
> unsigned frontbuffer_bits, enum fb_op_origin origin)
> {
> - if (!CAN_PSR(dev_priv))
> - return;
> -
> - if (origin == ORIGIN_FLIP) {
> - tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
> - return;
> - }
> -
> - mutex_lock(&dev_priv->psr.lock);
> - if (!dev_priv->psr.enabled) {
> - mutex_unlock(&dev_priv->psr.lock);
> - return;
> + struct intel_encoder *encoder;
> + struct intel_dp *intel_dp;
> +
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + intel_dp = enc_to_intel_dp(encoder);
> +
> + if (encoder->type == INTEL_OUTPUT_EDP && CAN_PSR(intel_dp)) {
> + if (origin == ORIGIN_FLIP) {
> + tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
> + continue;
> + }
> +
> + mutex_lock(&intel_dp->psr.lock);
> + if (!intel_dp->psr.enabled) {
> + mutex_unlock(&intel_dp->psr.lock);
> + continue;
> + }
> +
> + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
> + intel_dp->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
> +
> + /* By definition flush = invalidate + flush */
> + if (frontbuffer_bits)
> + psr_force_hw_tracking_exit(intel_dp);
> +
> + if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
> + schedule_work(&intel_dp->psr.work);
> + mutex_unlock(&intel_dp->psr.lock);
> + }
> }
> -
> - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
> - dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
> -
> - /* By definition flush = invalidate + flush */
> - if (frontbuffer_bits)
> - psr_force_hw_tracking_exit(dev_priv);
> -
> - if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
> - schedule_work(&dev_priv->psr.work);
> - mutex_unlock(&dev_priv->psr.lock);
> }
>
> /**
> * intel_psr_init - Init basic PSR work and mutex.
> - * @dev_priv: i915 device private
> + * @intel_dp: Intel DP
> *
> - * This function is called only once at driver load to initialize basic
> - * PSR stuff.
> + * This function is called after the initializing connector.
> + * (the initializing of connector treats the handling of connector capabilities)
> + * And it initializes basic PSR stuff for each DP Encoder.
> */
> -void intel_psr_init(struct drm_i915_private *dev_priv)
> +void intel_psr_init(struct intel_dp *intel_dp)
> {
> + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> +
> if (!HAS_PSR(dev_priv))
> return;
>
> - if (!dev_priv->psr.sink_support)
> + if (!intel_dp->psr.sink_support)
> return;
>
> if (IS_HASWELL(dev_priv))
> @@ -1725,14 +1760,14 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
> /* Set link_standby x link_off defaults */
> if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> /* HSW and BDW require workarounds that we don't implement. */
> - dev_priv->psr.link_standby = false;
> + intel_dp->psr.link_standby = false;
> else if (INTEL_GEN(dev_priv) < 12)
> /* For new platforms up to TGL let's respect VBT back again */
> - dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
> + intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
>
> - INIT_WORK(&dev_priv->psr.work, intel_psr_work);
> - INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
> - mutex_init(&dev_priv->psr.lock);
> + INIT_WORK(&intel_dp->psr.work, intel_psr_work);
> + INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
> + mutex_init(&intel_dp->psr.lock);
> }
>
> static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
> @@ -1758,7 +1793,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
> {
> struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> struct drm_dp_aux *aux = &intel_dp->aux;
> - struct i915_psr *psr = &dev_priv->psr;
> + struct intel_psr *psr = &intel_dp->psr;
> u8 val;
> int r;
>
> @@ -1785,7 +1820,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
> static void psr_capability_changed_check(struct intel_dp *intel_dp)
> {
> struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> - struct i915_psr *psr = &dev_priv->psr;
> + struct intel_psr *psr = &intel_dp->psr;
> u8 val;
> int r;
>
> @@ -1809,18 +1844,18 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
> void intel_psr_short_pulse(struct intel_dp *intel_dp)
> {
> struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> - struct i915_psr *psr = &dev_priv->psr;
> + struct intel_psr *psr = &intel_dp->psr;
> u8 status, error_status;
> const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
> DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
> DP_PSR_LINK_CRC_ERROR;
>
> - if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
> + if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
> return;
>
> mutex_lock(&psr->lock);
>
> - if (!psr->enabled || psr->dp != intel_dp)
> + if (!psr->enabled)
> goto exit;
>
> if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
> @@ -1863,15 +1898,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
>
> bool intel_psr_enabled(struct intel_dp *intel_dp)
> {
> - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> bool ret;
>
> - if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
> + if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
> return false;
>
> - mutex_lock(&dev_priv->psr.lock);
> - ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
> - mutex_unlock(&dev_priv->psr.lock);
> + mutex_lock(&intel_dp->psr.lock);
> + ret = intel_dp->psr.enabled;
> + mutex_unlock(&intel_dp->psr.lock);
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
> index 0a517978e8af..03eb19547d09 100644
> --- a/drivers/gpu/drm/i915/display/intel_psr.h
> +++ b/drivers/gpu/drm/i915/display/intel_psr.h
> @@ -18,7 +18,7 @@ struct intel_atomic_state;
> struct intel_plane_state;
> struct intel_plane;
>
> -#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
> +#define CAN_PSR(intel_dp) (HAS_PSR(dp_to_i915(intel_dp)) && intel_dp->psr.sink_support)
> void intel_psr_init_dpcd(struct intel_dp *intel_dp);
> void intel_psr_enable(struct intel_dp *intel_dp,
> const struct intel_crtc_state *crtc_state,
> @@ -28,24 +28,24 @@ void intel_psr_disable(struct intel_dp *intel_dp,
> void intel_psr_update(struct intel_dp *intel_dp,
> const struct intel_crtc_state *crtc_state,
> const struct drm_connector_state *conn_state);
> -int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
> +int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
> void intel_psr_invalidate(struct drm_i915_private *dev_priv,
> unsigned frontbuffer_bits,
> enum fb_op_origin origin);
> void intel_psr_flush(struct drm_i915_private *dev_priv,
> unsigned frontbuffer_bits,
> enum fb_op_origin origin);
> -void intel_psr_init(struct drm_i915_private *dev_priv);
> +void intel_psr_init(struct intel_dp *intel_dp);
> void intel_psr_compute_config(struct intel_dp *intel_dp,
> struct intel_crtc_state *crtc_state);
> -void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
> +void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
> void intel_psr_short_pulse(struct intel_dp *intel_dp);
> -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
> - u32 *out_value);
> +void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
> bool intel_psr_enabled(struct intel_dp *intel_dp);
> int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
> struct intel_crtc *crtc);
> -void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
> +void intel_psr2_program_trans_man_trk_ctl(struct intel_dp *intel_dp,
> + const struct intel_crtc_state *crtc_state);
> void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
> const struct intel_crtc_state *crtc_state,
> const struct intel_plane_state *plane_state,
> diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
> index b7e208816074..948234c60648 100644
> --- a/drivers/gpu/drm/i915/display/intel_sprite.c
> +++ b/drivers/gpu/drm/i915/display/intel_sprite.c
> @@ -84,7 +84,6 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
> bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
> intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
> DEFINE_WAIT(wait);
> - u32 psr_status;
>
> if (new_crtc_state->uapi.async_flip)
> return;
> @@ -109,10 +108,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
> * VBL interrupts will start the PSR exit and prevent a PSR
> * re-entry as well.
> */
> - if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
> - drm_err(&dev_priv->drm,
> - "PSR idle timed out 0x%x, atomic update may fail\n",
> - psr_status);
> + intel_psr_wait_for_idle(new_crtc_state);
>
> local_irq_disable();
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 5d04b282c060..a2e39e8adcbe 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -475,42 +475,6 @@ struct i915_drrs {
> enum drrs_support_type type;
> };
>
> -struct i915_psr {
> - struct mutex lock;
> -
> -#define I915_PSR_DEBUG_MODE_MASK 0x0f
> -#define I915_PSR_DEBUG_DEFAULT 0x00
> -#define I915_PSR_DEBUG_DISABLE 0x01
> -#define I915_PSR_DEBUG_ENABLE 0x02
> -#define I915_PSR_DEBUG_FORCE_PSR1 0x03
> -#define I915_PSR_DEBUG_IRQ 0x10
> -
> - u32 debug;
> - bool sink_support;
> - bool enabled;
> - struct intel_dp *dp;
> - enum pipe pipe;
> - enum transcoder transcoder;
> - bool active;
> - struct work_struct work;
> - unsigned busy_frontbuffer_bits;
> - bool sink_psr2_support;
> - bool link_standby;
> - bool colorimetry_support;
> - bool psr2_enabled;
> - bool psr2_sel_fetch_enabled;
> - u8 sink_sync_latency;
> - ktime_t last_entry_attempt;
> - ktime_t last_exit;
> - bool sink_not_reliable;
> - bool irq_aux_error;
> - u16 su_x_granularity;
> - bool dc3co_enabled;
> - u32 dc3co_exit_delay;
> - struct delayed_work dc3co_work;
> - struct drm_dp_vsc_sdp vsc;
> -};
> -
> #define QUIRK_LVDS_SSC_DISABLE (1<<1)
> #define QUIRK_INVERT_BRIGHTNESS (1<<2)
> #define QUIRK_BACKLIGHT_PRESENT (1<<3)
> @@ -1041,8 +1005,6 @@ struct drm_i915_private {
>
> struct i915_power_domains power_domains;
>
> - struct i915_psr psr;
> -
> struct i915_gpu_error gpu_error;
>
> struct drm_i915_gem_object *vlv_pctx;
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 491f82500d68..8546786521b5 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -2044,10 +2044,22 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
> ivb_err_int_handler(dev_priv);
>
> if (de_iir & DE_EDP_PSR_INT_HSW) {
> - u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
> + struct intel_encoder *encoder;
>
> - intel_psr_irq_handler(dev_priv, psr_iir);
> - intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
> +
> + if (encoder->type == INTEL_OUTPUT_EDP &&
> + CAN_PSR(intel_dp)) {
> + u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
> + EDP_PSR_IIR);
> +
> + intel_psr_irq_handler(intel_dp, psr_iir);
> + intel_uncore_write(&dev_priv->uncore,
> + EDP_PSR_IIR, psr_iir);
> + break;
> + }
> + }
> }
>
> if (de_iir & DE_AUX_CHANNEL_A_IVB)
> @@ -2255,21 +2267,34 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
> }
>
> if (iir & GEN8_DE_EDP_PSR) {
> + struct intel_encoder *encoder;
> u32 psr_iir;
> i915_reg_t iir_reg;
>
> - if (INTEL_GEN(dev_priv) >= 12)
> - iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
> - else
> - iir_reg = EDP_PSR_IIR;
> + for_each_intel_dp(&dev_priv->drm, encoder) {
> + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
>
> - psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
> - intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
> + if (INTEL_GEN(dev_priv) >= 12 && CAN_PSR(intel_dp)) {
> + iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
> + } else if (encoder->type == INTEL_OUTPUT_EDP &&
> + CAN_PSR(intel_dp)) {
> + iir_reg = EDP_PSR_IIR;
> + } else {
> + continue;
> + }
> +
> + psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
> + intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
> +
> + if (psr_iir)
> + found = true;
>
> - if (psr_iir)
> - found = true;
> + intel_psr_irq_handler(intel_dp, psr_iir);
>
> - intel_psr_irq_handler(dev_priv, psr_iir);
> + /* prior GEN12 only have one EDP PSR */
> + if (INTEL_GEN(dev_priv) < 12)
> + break;
> + }
> }
>
> if (!found)
> --
> 2.25.0
>
More information about the Intel-gfx
mailing list