[Intel-gfx] [PATCH 5/5] drm/i915: Link Rate fallback on Link training failure

Jim Bride jim.bride at linux.intel.com
Tue Oct 25 18:00:19 UTC 2016


On Tue, Oct 25, 2016 at 03:17:47PM +0300, Jani Nikula wrote:
> On Sat, 22 Oct 2016, Manasi Navare <manasi.d.navare at intel.com> wrote:
> > If link training at a link rate optimal for a particular
> > mode fails during modeset's atomic commit phase, then we
> > let the modeset complete and then retry. We save the link rate
> > value at which link training failed and use a lower link rate
> > to prune the modes. It will redo the modeset on the current mode
> > at lower link rate or if the current mode gets pruned due to lower
> > link constraints then, it will send a hotplug uevent for userspace
> > to handle it.
> >
> > This is also required to pass DP CTS tests 4.3.1.3, 4.3.1.4,
> > 4.3.1.6.
> >
> > Cc: Jani Nikula <jani.nikula at linux.intel.com>
> > Cc: Daniel Vetter <daniel.vetter at intel.com>
> > Cc: Ville Syrjala <ville.syrjala at linux.intel.com>
> > Signed-off-by: Manasi Navare <manasi.d.navare at intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_ddi.c              | 15 +++++-
> >  drivers/gpu/drm/i915/intel_dp.c               | 69 ++++++++++++++++++++++++++-
> >  drivers/gpu/drm/i915/intel_dp_link_training.c | 12 +++--
> >  drivers/gpu/drm/i915/intel_drv.h              |  6 ++-
> >  4 files changed, 95 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
> > index fb18d69..451433b 100644
> > --- a/drivers/gpu/drm/i915/intel_ddi.c
> > +++ b/drivers/gpu/drm/i915/intel_ddi.c
> > @@ -1712,6 +1712,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
> >  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
> >  	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
> >  	enum port port = intel_ddi_get_encoder_port(encoder);
> > +	struct intel_connector *intel_connector = intel_dp->attached_connector;
> > +	struct drm_connector *connector = &intel_connector->base;
> >  
> >  	intel_dp_set_link_params(intel_dp, link_rate, lane_count,
> >  				 link_mst);
> > @@ -1722,7 +1724,18 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
> >  	intel_prepare_dp_ddi_buffers(encoder);
> >  	intel_ddi_init_dp_buf_reg(encoder);
> >  	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
> > -	intel_dp_start_link_train(intel_dp);
> > +	if (!intel_dp_start_link_train(intel_dp)) {
> > +		DRM_ERROR("Link Training failed at link rate = %d, lane count = %d\n",
> > +			  link_rate, lane_count);
> > +		intel_dp->link_train_failed = true;
> > +		intel_dp->link_train_failed_link_rate = link_rate;
> > +		intel_dp->link_train_failed_lane_count = lane_count;
> 
> I think eventually you'll need to store a list (array) of failing link
> rate, lane count pairs, not just the last that failed. Now you restrict
> the link config computation to only reducing the link rate. But
> currently (for whatever reason, it's flip-flopped too many times) we
> start with wide & slow, meaning that in many cases we've already
> exhausted the option to go slower. If optimal fails, maybe we need to
> try narrow & fast instead.

The DP spec specifically calls out that lane count shouldn't be reduced
until all speeds with the current lane configuration fail.  Even if
we start at an "optimal" configuration I believe we still need to
follow the reduction pattern that the spec calls out.

Jim


> 
> BR,
> Jani.
> 
> 
> > +		/* Schedule a Hotplug Uevent to userspace to start modeset */
> > +		schedule_work(&connector->i915_modeset_retry_work);
> > +	} else {
> > +		intel_dp->link_train_failed = false;
> > +	}
> > +
> >  	if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
> >  		intel_dp_stop_link_train(intel_dp);
> >  }
> > diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> > index c192e18..5d5f4a7 100644
> > --- a/drivers/gpu/drm/i915/intel_dp.c
> > +++ b/drivers/gpu/drm/i915/intel_dp.c
> > @@ -313,6 +313,7 @@ static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
> >  	int target_clock = mode->clock;
> >  	int max_rate, mode_rate, max_lanes, max_link_clock;
> >  	int max_dotclk;
> > +	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
> >  
> >  	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
> >  
> > @@ -326,8 +327,27 @@ static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
> >  		target_clock = fixed_mode->clock;
> >  	}
> >  
> > -	max_link_clock = intel_dp_max_link_rate(intel_dp);
> > -	max_lanes = intel_dp_max_lane_count(intel_dp);
> > +	/* Prune the modes based on the link rate that failed */
> > +	if (intel_dp->link_train_failed_link_rate) {
> > +		intel_dp->link_rate_index = intel_dp_link_rate_index(intel_dp,
> > +								     common_rates,
> > +								     intel_dp->link_train_failed_link_rate);
> > +		if (intel_dp->link_rate_index > 0) {
> > +			max_link_clock = common_rates[intel_dp->link_rate_index - 1];
> > +			max_lanes = intel_dp_max_lane_count(intel_dp);
> > +		} else {
> > +			/* Here we can lower the lane count, but that will be
> > +			 * added for DP Spec 1.3
> > +			 */
> > +			DRM_ERROR("No Valid Mode Supported for this Link\n");
> > +			intel_dp->link_train_failed_link_rate = 0;
> > +			intel_dp->link_rate_index = -1;
> > +			intel_dp->link_train_failed = false;
> > +		}
> > +	} else {
> > +		max_link_clock = intel_dp_max_link_rate(intel_dp);
> > +		max_lanes = intel_dp_max_lane_count(intel_dp);
> > +	}
> >  
> >  	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
> >  	mode_rate = intel_dp_link_required(target_clock, 18);
> > @@ -1619,6 +1639,14 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
> >  	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
> >  		return false;
> >  
> > +	/* Fall back to lower link rate in case of failure in previous modeset */
> > +	if (intel_dp->link_train_failed_link_rate) {
> > +		min_lane_count = max_lane_count;
> > +		min_clock = max_clock = intel_dp->link_rate_index - 1;
> > +		intel_dp->link_train_failed_link_rate = 0;
> > +		intel_dp->link_rate_index = -1;
> > +	}
> > +
> >  	DRM_DEBUG_KMS("DP link computation with max lane count %i "
> >  		      "max bw %d pixel clock %iKHz\n",
> >  		      max_lane_count, common_rates[max_clock],
> > @@ -5689,6 +5717,39 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
> >  	return false;
> >  }
> >  
> > +static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
> > +{
> > +	struct drm_connector *connector;
> > +	struct intel_dp *intel_dp;
> > +	struct drm_display_mode *mode;
> > +	bool verbose_prune = true;
> > +	bool reprobe = false;
> > +
> > +	connector = container_of(work, typeof(*connector),
> > +				 i915_modeset_retry_work);
> > +	intel_dp = intel_attached_dp(connector);
> > +
> > +	/* Grab the locks before changing connector property*/
> > +	mutex_lock(&connector->dev->mode_config.mutex);
> > +	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
> > +		      connector->name);
> > +	list_for_each_entry(mode, &connector->modes, head) {
> > +		mode->status = intel_dp_mode_valid(connector,
> > +						   mode);
> > +		if (mode->status != MODE_OK)
> > +			reprobe = true;
> > +	}
> > +	drm_mode_prune_invalid(connector->dev, &connector->modes,
> > +			       verbose_prune);
> > +	mutex_unlock(&connector->dev->mode_config.mutex);
> > +	if (reprobe) {
> > +		/* Send Hotplug uevent so userspace can reprobe */
> > +		drm_kms_helper_hotplug_event(connector->dev);
> > +	}
> > +	if (intel_dp->link_train_failed)
> > +		drm_atomic_helper_connector_modeset(connector);
> > +}
> > +
> >  bool
> >  intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
> >  			struct intel_connector *intel_connector)
> > @@ -5701,6 +5762,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
> >  	enum port port = intel_dig_port->port;
> >  	int type;
> >  
> > +	/* Initialize the work for modeset in case of link train failure */
> > +	INIT_WORK(&connector->i915_modeset_retry_work,
> > +		  intel_dp_modeset_retry_work_fn);
> > +
> >  	if (WARN(intel_dig_port->max_lanes < 1,
> >  		 "Not enough lanes (%d) for DP on port %c\n",
> >  		 intel_dig_port->max_lanes, port_name(port)))
> > diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
> > index 0048b52..10f81ab 100644
> > --- a/drivers/gpu/drm/i915/intel_dp_link_training.c
> > +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
> > @@ -310,9 +310,15 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp)
> >  				DP_TRAINING_PATTERN_DISABLE);
> >  }
> >  
> > -void
> > +bool
> >  intel_dp_start_link_train(struct intel_dp *intel_dp)
> >  {
> > -	intel_dp_link_training_clock_recovery(intel_dp);
> > -	intel_dp_link_training_channel_equalization(intel_dp);
> > +	bool ret;
> > +
> > +	if (intel_dp_link_training_clock_recovery(intel_dp)) {
> > +		ret = intel_dp_link_training_channel_equalization(intel_dp);
> > +		if (ret)
> > +			return true;
> > +	}
> > +	return false;
> >  }
> > diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> > index 4e90b07..d3fcffc 100644
> > --- a/drivers/gpu/drm/i915/intel_drv.h
> > +++ b/drivers/gpu/drm/i915/intel_drv.h
> > @@ -890,6 +890,10 @@ struct intel_dp {
> >  	uint32_t DP;
> >  	int link_rate;
> >  	uint8_t lane_count;
> > +	int link_train_failed_link_rate;
> > +	uint8_t link_train_failed_lane_count;
> > +	int link_rate_index;
> > +	bool link_train_failed;
> >  	uint8_t sink_count;
> >  	bool link_mst;
> >  	bool has_audio;
> > @@ -1394,7 +1398,7 @@ bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
> >  void intel_dp_set_link_params(struct intel_dp *intel_dp,
> >  			      int link_rate, uint8_t lane_count,
> >  			      bool link_mst);
> > -void intel_dp_start_link_train(struct intel_dp *intel_dp);
> > +bool intel_dp_start_link_train(struct intel_dp *intel_dp);
> >  void intel_dp_stop_link_train(struct intel_dp *intel_dp);
> >  void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
> >  void intel_dp_encoder_reset(struct drm_encoder *encoder);
> 
> -- 
> Jani Nikula, Intel Open Source Technology Center
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx


More information about the Intel-gfx mailing list