[Intel-gfx] [PATCHv5 5/5] drm/i915/dp: Enable Upfront link training for typeC DP support on BXT

Ander Conselvan De Oliveira conselvan2 at gmail.com
Tue May 17 11:28:05 UTC 2016


On Mon, 2016-05-16 at 11:56 +0530, Durgadoss R wrote:
> To support USB type C alternate DP mode, the display driver needs to
> know the number of lanes required by the DP panel as well as number
> of lanes that can be supported by the type-C cable. Sometimes, the
> type-C cable may limit the bandwidth even if Panel can support
> more lanes. To address these scenarios, the display driver will
> start link training with max lanes, and if that fails, the driver
> falls back to x2 lanes; and repeats this procedure for all
> bandwidth/lane configurations.
> 
> * Since link training is done before modeset only the port
>   (and not pipe/planes) and its associated PLLs are enabled.
> * On DP hotplug: Directly start link training on the DP encoder.
> * On Connected boot scenarios: When booted with an LFP and a DP,
>   typically, BIOS brings up DP. In these cases, we disable the
>   crtc and then do upfront link training; and bring it back up.
> * All local changes made for upfront link training are reset
>   to their previous values once it is done; so that the
>   subsequent modeset is not aware of these changes.
> 
> Changes since v4:
> * Removed usage of crtc_state in upfront link training;
>   Hence no need to find free crtc to do upfront now.
> * Re-enable crtc if it was disabled for upfront.
> * Use separate variables to track max lane count
>   and link rate found by upfront, without modifying
>   the original DPCD read from panel.
> Changes since v3:
> * Fixed a return value on BXT check
> * Reworked on top of bxt_ddi_pll_select split from Ander
> * Renamed from ddi_upfront to bxt_upfront since the
>   upfront logic includes BXT specific functions for now.
> Changes since v2:
> * Rebased on top of latest dpll_mgr.c code and
>   latest HPD related clean ups.
> * Corrected return values from upfront (Ander)
> * Corrected atomic locking for upfront in intel_dp.c (Ville)
> Changes since v1:
> *  all pll related functions inside ddi.c
> 
> Signed-off-by: Durgadoss R <durgadoss.r at intel.com>
> ---
>  drivers/gpu/drm/i915/intel_ddi.c |  58 +++++++++++++
>  drivers/gpu/drm/i915/intel_dp.c  | 183 +++++++++++++++++++++++++++++++++++++-
> -
>  drivers/gpu/drm/i915/intel_drv.h |  12 +++
>  3 files changed, 247 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ddi.c
> b/drivers/gpu/drm/i915/intel_ddi.c
> index 7e6331a..3ccd1c7 100644
> --- a/drivers/gpu/drm/i915/intel_ddi.c
> +++ b/drivers/gpu/drm/i915/intel_ddi.c
> @@ -2330,6 +2330,64 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port
> *intel_dig_port)
>  	return connector;
>  }
>  
> +int intel_bxt_upfront_link_train(struct intel_dp *intel_dp,
> +				int clock, uint8_t lane_count)
> +{
> +	struct intel_connector *connector = intel_dp->attached_connector;
> +	struct intel_encoder *encoder = connector->encoder;
> +	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
> +	struct intel_shared_dpll *pll;
> +	struct intel_shared_dpll_config tmp_pll_config;
> +	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
> +	enum intel_dpll_id dpll_id = (enum intel_dpll_id)dig_port->port;
> +
> +	mutex_lock(&dev_priv->dpll_lock);
> +	/*
> +	 * FIXME: Works only for BXT.
> +	 * Select the required PLL. This works for platforms where
> +	 * there is no shared DPLL.
> +	 */
> +	pll = &dev_priv->shared_dplls[dpll_id];
> +	if (WARN_ON(pll->active_mask)) {
> +		DRM_ERROR("Shared DPLL already in use. active_mask:%x\n",
> pll->active_mask);
> +		mutex_unlock(&dev_priv->dpll_lock);
> +		return -EINVAL;
> +	}
> +
> +	tmp_pll_config = pll->config;
> +
> +	DRM_DEBUG_KMS("Upfront link train start : link_clock:%d lanes:%d\n",
> +							clock, lane_count);
> +	do {
> +		if (!bxt_ddi_dp_set_dpll_hw_state(clock, &pll-
> >config.hw_state)) {
> +			DRM_ERROR("Could not setup DPLL\n");
> +			goto exit_pll;
> +		}
> +
> +		/* Enable PLL followed by port */
> +		pll->funcs.enable(dev_priv, pll);
> +		intel_ddi_pre_enable_dp(encoder, clock, lane_count, pll);
> +
> +		if (intel_dp->train_set_valid)
> +			intel_dp_set_upfront_params(intel_dp, clock,
> lane_count);
> +
> +		/* Disable port followed by PLL for next retry/clean up */
> +		intel_ddi_post_disable(encoder);
> +		pll->funcs.disable(dev_priv, pll);
> +
> +	} while (!intel_dp->train_set_valid &&
> +		intel_dp_get_link_retry_params(intel_dp, &clock,
> &lane_count));

What I had in mind is that this loop would still be in the generic DP code.
Something like the following in intel_dp_upfront_link_train():

for (done = false, lane_count = 4; lane_count >= 1 && !done; lane_count >> 1) {

	/* maybe use intel_dp_common_rates() here */
	for (link_rate = HBR2; link_rate >= RBR && !done; link_rate ... ) {
		done = platform_specific_link_enable(intel_dp,
						     link_rate, lane_count);

		if (done) {
			intel_dp->max_link_bw_upfront = ...;
			intel_dp->max_lane_count_upfront = ...;
		}
	}
}

The one inconvenience is that dev_priv->dpll_lock will have to be locked in
intel_dp_upfront_link_train() instead of down here. But the end result will be
pretty much the same.



> +
> +	DRM_DEBUG_KMS("Upfront link train %s: link_clock:%d lanes:%d\n",
> +	intel_dp->train_set_valid ? "Passed" : "Failed", clock, lane_count);
> +
> +exit_pll:
> +	pll->config = tmp_pll_config;
> +	mutex_unlock(&dev_priv->dpll_lock);
> +
> +	return intel_dp->train_set_valid ? 0 : -1;

Do you really intend to return -EPERM in the failure case? Maybe the return
value should just be a bool.

Ander

> +}
> +
>  void intel_ddi_init(struct drm_device *dev, enum port port)
>  {
>  	struct drm_i915_private *dev_priv = dev->dev_private;
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 95ba5aa..a52c989 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -147,18 +147,28 @@ intel_dp_max_link_bw(struct intel_dp  *intel_dp)
>  		max_link_bw = DP_LINK_BW_1_62;
>  		break;
>  	}
> -	return max_link_bw;
> +	/*
> +	 * Limit max link bw w.r.t to the max value found
> +	 * using Upfront link training also.
> +	 */
> +	return min(max_link_bw, intel_dp->max_link_bw_upfront);
>  }
>  
>  static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
>  {
>  	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
> -	u8 source_max, sink_max;
> +	u8 temp, source_max, sink_max;
>  
>  	source_max = intel_dig_port->max_lanes;
>  	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
>  
> -	return min(source_max, sink_max);
> +	temp = min(source_max, sink_max);
> +
> +	/*
> +	 * Limit max lanes w.r.t to the max value found
> +	 * using Upfront link training also.
> +	 */
> +	return min(temp, intel_dp->max_lanes_upfront);
>  }
>  
>  /*
> @@ -1590,6 +1600,49 @@ void intel_dp_set_link_params(struct intel_dp
> *intel_dp,
>  	intel_dp->lane_count = lane_count;
>  }
>  
> +static void intel_dp_init_upfront_params(struct intel_dp *intel_dp)
> +{
> +	if (WARN_ON(intel_dp->upfront_done))
> +		return;
> +
> +	intel_dp->max_link_bw_upfront = intel_dp->dpcd[DP_MAX_LINK_RATE];
> +	intel_dp->max_lanes_upfront = drm_dp_max_lane_count(intel_dp->dpcd);
> +}
> +
> +void intel_dp_set_upfront_params(struct intel_dp *intel_dp,
> +				int link_rate, uint8_t lane_count)
> +{
> +	intel_dp->max_link_bw_upfront =
> drm_dp_link_rate_to_bw_code(link_rate);
> +	intel_dp->max_lanes_upfront = lane_count;
> +}
> +
> +bool intel_dp_get_link_retry_params(struct intel_dp *intel_dp,
> +			int *link_rate, uint8_t *lane_count)
> +{
> +	uint8_t link_bw = drm_dp_link_rate_to_bw_code(*link_rate);
> +
> +	/*
> +	 * As per DP1.3 Spec, retry all link rates for a particular
> +	 * lane count value, before reducing number of lanes.
> +	 */
> +	if (link_bw == DP_LINK_BW_5_4) {
> +		*link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
> +	} else if (link_bw == DP_LINK_BW_2_7) {
> +		*link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
> +	} else if (*lane_count == 4) {
> +		*lane_count = 2;
> +		*link_rate = intel_dp_max_link_rate(intel_dp);
> +	} else if (*lane_count == 2) {
> +		*lane_count = 1;
> +		*link_rate = intel_dp_max_link_rate(intel_dp);
> +	} else {
> +		/* Tried all combinations, so exit */
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
>  static void intel_dp_prepare(struct intel_encoder *encoder)
>  {
>  	struct drm_device *dev = encoder->base.dev;
> @@ -3424,6 +3477,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
>  		intel_dp->num_sink_rates = i;
>  	}
>  
> +	/*
> +	 * The link_bw and lane count vaues are initialized to MAX possible
> +	 * value for all encoder types i.e DP, eDP, DP-MST, so that the
> +	 * intel_dp_max_{link_bw/lane_count} APIs do not have to worry
> +	 * about encoder types. They further cap the max w.r.t the upfront
> +	 * values also.
> +	 */
> +	if (!intel_dp->upfront_done)
> +		intel_dp_init_upfront_params(intel_dp);
> +
>  	intel_dp_print_rates(intel_dp);
>  
>  	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
> @@ -4140,6 +4203,99 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
>  	intel_dp->has_audio = false;
>  }
>  
> +static int intel_dp_upfront_crtc_disable(struct intel_crtc *crtc,
> +				struct drm_modeset_acquire_ctx *ctx,
> +				bool enable)
> +{
> +	int ret;
> +	struct drm_atomic_state *state;
> +	struct intel_crtc_state *crtc_state;
> +	struct drm_device *dev = crtc->base.dev;
> +	enum pipe pipe = crtc->pipe;
> +
> +	state = drm_atomic_state_alloc(dev);
> +	if (!state)
> +		return -ENOMEM;
> +
> +	state->acquire_ctx = ctx;
> +
> +	crtc_state = intel_atomic_get_crtc_state(state, crtc);
> +	if (IS_ERR(crtc_state)) {
> +		ret = PTR_ERR(crtc_state);
> +		drm_atomic_state_free(state);
> +		return ret;
> +	}
> +
> +	DRM_DEBUG_KMS("%sabling crtc %c %s upfront link train\n",
> +			enable ? "En" : "Dis",
> +			pipe_name(pipe),
> +			enable ? "after" : "before");
> +
> +	crtc_state->base.active = enable;
> +	ret = drm_atomic_commit(state);
> +	if (ret)
> +		drm_atomic_state_free(state);
> +
> +	return ret;
> +}
> +
> +static int intel_dp_upfront_link_train(struct intel_dp *intel_dp)
> +{
> +	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
> +	struct intel_encoder *intel_encoder = &intel_dig_port->base;
> +	struct drm_device *dev = intel_encoder->base.dev;
> +	struct drm_mode_config *config = &dev->mode_config;
> +	struct drm_modeset_acquire_ctx ctx;
> +	struct intel_crtc *intel_crtc;
> +	struct drm_crtc *crtc = NULL;
> +	int ret, status = 0;
> +	uint8_t max_lanes = intel_dp->max_lanes_upfront;
> +	int max_clock = intel_dp_max_link_rate(intel_dp);
> +
> +	if (!IS_BROXTON(dev))
> +		return 0;
> +
> +	drm_modeset_acquire_init(&ctx, 0);
> +retry:
> +	ret = drm_modeset_lock(&config->connection_mutex, &ctx);
> +	if (ret)
> +		goto exit_fail;
> +
> +	if (intel_encoder->base.crtc) {
> +		crtc = intel_encoder->base.crtc;
> +
> +		ret = drm_modeset_lock(&crtc->mutex, &ctx);
> +		if (ret)
> +			goto exit_fail;
> +
> +		ret = drm_modeset_lock(&crtc->primary->mutex, &ctx);
> +		if (ret)
> +			goto exit_fail;
> +
> +		intel_crtc = to_intel_crtc(crtc);
> +		ret = intel_dp_upfront_crtc_disable(intel_crtc, &ctx, false);
> +		if (ret)
> +			goto exit_fail;
> +	}
> +
> +	if (IS_BROXTON(dev))
> +		status = intel_bxt_upfront_link_train(intel_dp,
> +						max_clock, max_lanes);
> +		/* Other platforms upfront link train call goes here..*/
> +
> +	if (crtc)
> +		ret = intel_dp_upfront_crtc_disable(intel_crtc, &ctx, true);
> +
> +exit_fail:
> +	if (ret == -EDEADLK) {
> +		drm_modeset_backoff(&ctx);
> +		goto retry;
> +	}
> +	drm_modeset_drop_locks(&ctx);
> +	drm_modeset_acquire_fini(&ctx);
> +	return status;
> +}
> +
>  static void
>  intel_dp_long_pulse(struct intel_connector *intel_connector)
>  {
> @@ -4150,7 +4306,7 @@ intel_dp_long_pulse(struct intel_connector
> *intel_connector)
>  	struct drm_device *dev = connector->dev;
>  	enum drm_connector_status status;
>  	enum intel_display_power_domain power_domain;
> -	bool ret;
> +	bool ret, do_upfront_link_train;
>  	u8 sink_irq_vector;
>  
>  	power_domain = intel_display_port_aux_power_domain(intel_encoder);
> @@ -4235,10 +4391,25 @@ intel_dp_long_pulse(struct intel_connector
> *intel_connector)
>  			DRM_DEBUG_DRIVER("CP or sink specific irq
> unhandled\n");
>  	}
>  
> +	/* Do not do upfront link train, if it is a compliance request */
> +	do_upfront_link_train = !intel_dp->upfront_done &&
> +		intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT &&
> +		intel_dp->compliance_test_type != DP_TEST_LINK_TRAINING;
> +
> +	if (do_upfront_link_train) {
> +		if (intel_dp_upfront_link_train(intel_dp)) {
> +			status = connector_status_disconnected;
> +			intel_dp->upfront_done = false;
> +		} else {
> +			intel_dp->upfront_done = true;
> +		}
> +	}
> +
>  out:
> -	if ((status != connector_status_connected) &&
> -	    (intel_dp->is_mst == false))
> +	if (status != connector_status_connected && !intel_dp->is_mst) {
>  		intel_dp_unset_edid(intel_dp);
> +		intel_dp->upfront_done = false;
> +	}
>  
>  	intel_display_power_put(to_i915(dev), power_domain);
>  	return;
> diff --git a/drivers/gpu/drm/i915/intel_drv.h
> b/drivers/gpu/drm/i915/intel_drv.h
> index 3315d57..7930c60 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -812,6 +812,12 @@ struct intel_dp {
>  	enum hdmi_force_audio force_audio;
>  	bool limited_color_range;
>  	bool color_range_auto;
> +
> +	/* Upfront link train parameters */
> +	int max_link_bw_upfront;
> +	uint8_t max_lanes_upfront;
> +	bool upfront_done;
> +
>  	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
>  	uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
>  	uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
> @@ -1086,6 +1092,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
>  			 struct intel_crtc_state *pipe_config);
>  void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
>  uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
> +int intel_bxt_upfront_link_train(struct intel_dp *intel_dp,
> +				int clock, uint8_t lane_count);
>  
>  /* intel_frontbuffer.c */
>  void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
> @@ -1292,6 +1300,10 @@ bool intel_dp_init_connector(struct intel_digital_port
> *intel_dig_port,
>  			     struct intel_connector *intel_connector);
>  void intel_dp_set_link_params(struct intel_dp *intel_dp,
>  				int link_rate, uint8_t lane_count);
> +void intel_dp_set_upfront_params(struct intel_dp *intel_dp,
> +				int link_rate, uint8_t lane_count);
> +bool intel_dp_get_link_retry_params(struct intel_dp *intel_dp,
> +				int *link_rate, uint8_t *lane_count);
>  void intel_dp_start_link_train(struct intel_dp *intel_dp);
>  void intel_dp_stop_link_train(struct intel_dp *intel_dp);
>  void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);


More information about the Intel-gfx mailing list