[PATCH 04/20] drm/i915/dp: Sanitize intel_dp_get_link_train_fallback_values()
Imre Deak
imre.deak at intel.com
Wed May 15 13:20:49 UTC 2024
On Wed, May 15, 2024 at 04:05:32PM +0300, Jani Nikula wrote:
> On Tue, 14 May 2024, Imre Deak <imre.deak at intel.com> wrote:
> > Reduce the indentation in intel_dp_get_link_train_fallback_values() by
> > adding separate helpers to reduce the link rate and lane count. Also
> > simplify things by passing crtc_state to the function.
> >
> > This also prepares for later patches in the patchset adding a limitation
> > on how the link params are reduced.
>
> Currently we have these rigid reductions of rate, lane and bpp. I don't
> think we want to lock in to this policy, but rather make it more
> flexible. So I think this patch is counter productive.
>
> My grand plan I never got around to was to create a list/array of
> possible sets of link parameters that you could trivially sort, filter,
> iterate, and remove parameters from. (And I don't mean a statically
> written one, but a dynamically generated list.)
>
> In that scheme, you'd get the fallback values by advancing one element
> in the data structure. And you know when you've exhausted all fallbacks
> when you're at the end. None of this complicated "if we've finished x,
> then try y, then z".
Yes, having a different fallback order than the current one makes sense
to me both BW utilization wise and because the standard describes it (as
an alternative?). I don't think this patch makes it more difficult to
impplement that, but the change does improve the readability of the
current code.
> And then you could sort the list based on bandwidth, and go from
> uncompressed uhbr to compressed non-uhbr to whatever.
>
>
> BR,
> Jani.
>
>
>
> >
> > Signed-off-by: Imre Deak <imre.deak at intel.com>
> > ---
> > .../drm/i915/display/intel_dp_link_training.c | 82 ++++++++++++-------
> > 1 file changed, 51 insertions(+), 31 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
> > index 1b4694b46cea7..1ea4aaf9592f1 100644
> > --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
> > +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
> > @@ -1109,11 +1109,37 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
> > return true;
> > }
> >
> > +static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate)
> > +{
> > + int rate_index;
> > + int new_rate;
> > +
> > + rate_index = intel_dp_rate_index(intel_dp->common_rates,
> > + intel_dp->num_common_rates,
> > + current_rate);
> > +
> > + if (rate_index <= 0)
> > + return -1;
> > +
> > + new_rate = intel_dp_common_rate(intel_dp, rate_index - 1);
> > +
> > + return new_rate;
> > +}
> > +
> > +static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count)
> > +{
> > + if (current_lane_count > 1)
> > + return current_lane_count >> 1;
> > +
> > + return -1;
> > +}
> > +
> > static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> > - int link_rate, u8 lane_count)
> > + const struct intel_crtc_state *crtc_state)
> > {
> > struct drm_i915_private *i915 = dp_to_i915(intel_dp);
> > - int index;
> > + int new_link_rate;
> > + int new_lane_count;
> >
> > /*
> > * TODO: Enable fallback on MST links once MST link compute can handle
> > @@ -1131,36 +1157,32 @@ static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> > return 0;
> > }
> >
> > - index = intel_dp_rate_index(intel_dp->common_rates,
> > - intel_dp->num_common_rates,
> > - link_rate);
> > - if (index > 0) {
> > - if (intel_dp_is_edp(intel_dp) &&
> > - !intel_dp_can_link_train_fallback_for_edp(intel_dp,
> > - intel_dp_common_rate(intel_dp, index - 1),
> > - lane_count)) {
> > - drm_dbg_kms(&i915->drm,
> > - "Retrying Link training for eDP with same parameters\n");
> > - return 0;
> > - }
> > - intel_dp->link_train.max_rate = intel_dp_common_rate(intel_dp, index - 1);
> > - intel_dp->link_train.max_lane_count = lane_count;
> > - } else if (lane_count > 1) {
> > - if (intel_dp_is_edp(intel_dp) &&
> > - !intel_dp_can_link_train_fallback_for_edp(intel_dp,
> > - intel_dp_max_common_rate(intel_dp),
> > - lane_count >> 1)) {
> > - drm_dbg_kms(&i915->drm,
> > - "Retrying Link training for eDP with same parameters\n");
> > - return 0;
> > - }
> > - intel_dp->link_train.max_rate = intel_dp_max_common_rate(intel_dp);
> > - intel_dp->link_train.max_lane_count = lane_count >> 1;
> > - } else {
> > + new_lane_count = crtc_state->lane_count;
> > + new_link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
> > + if (new_link_rate < 0) {
> > + new_lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
> > + new_link_rate = intel_dp_max_common_rate(intel_dp);
> > + }
> > +
> > + if (new_lane_count < 0) {
> > drm_err(&i915->drm, "Link Training Unsuccessful\n");
> > return -1;
> > }
> >
> > + if (intel_dp_is_edp(intel_dp) &&
> > + !intel_dp_can_link_train_fallback_for_edp(intel_dp, new_link_rate, new_lane_count)) {
> > + drm_dbg_kms(&i915->drm,
> > + "Retrying Link training for eDP with same parameters\n");
> > + return 0;
> > + }
> > +
> > + drm_dbg_kms(&i915->drm, "Reducing link parameters from %dx%d to %dx%d\n",
> > + crtc_state->port_clock, crtc_state->lane_count,
> > + new_link_rate, new_lane_count);
> > +
> > + intel_dp->link_train.max_rate = new_link_rate;
> > + intel_dp->link_train.max_lane_count = new_lane_count;
> > +
> > return 0;
> > }
> >
> > @@ -1178,9 +1200,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
> > lt_dbg(intel_dp, DP_PHY_DPRX,
> > "Link Training failed with HOBL active, not enabling it from now on\n");
> > intel_dp->hobl_failed = true;
> > - } else if (intel_dp_get_link_train_fallback_values(intel_dp,
> > - crtc_state->port_clock,
> > - crtc_state->lane_count)) {
> > + } else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state)) {
> > return;
> > }
>
> --
> Jani Nikula, Intel
More information about the Intel-gfx
mailing list