[Intel-gfx] [PATCHv6 5/5] drm/i915/dp: Enable Upfront link training for typeC DP support on BXT
Ville Syrjälä
ville.syrjala at linux.intel.com
Wed May 25 15:35:19 UTC 2016
On Fri, May 20, 2016 at 02:29:02PM +0530, Durgadoss R wrote:
> To support USB type C alternate DP mode, the display driver needs to
> know the number of lanes required by the DP panel as well as number
> of lanes that can be supported by the type-C cable. Sometimes, the
> type-C cable may limit the bandwidth even if Panel can support
> more lanes. To address these scenarios, the display driver will
> start link training with max lanes, and if that fails, the driver
> falls back to x2 lanes; and repeats this procedure for all
> bandwidth/lane configurations.
>
> * Since link training is done before modeset only the port
> (and not pipe/planes) and its associated PLLs are enabled.
> * On DP hotplug: Directly start link training on the DP encoder.
> * On Connected boot scenarios: When booted with an LFP and a DP,
> sometimes BIOS brings up DP. In these cases, we disable the
> crtc and then do upfront link training; and bring it back up.
> * All local changes made for upfront link training are reset
> to their previous values once it is done; so that the
> subsequent modeset is not aware of these changes.
>
> Changes since v5:
> * Moved retry logic in upfront to intel_dp.c so that it
> can be used for all platforms.
> Changes since v4:
> * Removed usage of crtc_state in upfront link training;
> Hence no need to find free crtc to do upfront now.
> * Re-enable crtc if it was disabled for upfront.
> * Use separate variables to track max lane count
> and link rate found by upfront, without modifying
> the original DPCD read from panel.
> Changes since v3:
> * Fixed a return value on BXT check
> * Reworked on top of bxt_ddi_pll_select split from Ander
> * Renamed from ddi_upfront to bxt_upfront since the
> upfront logic includes BXT specific functions for now.
> Changes since v2:
> * Rebased on top of latest dpll_mgr.c code and
> latest HPD related clean ups.
> * Corrected return values from upfront (Ander)
> * Corrected atomic locking for upfront in intel_dp.c (Ville)
> Changes since v1:
> * all pll related functions inside ddi.c
>
> Signed-off-by: Durgadoss R <durgadoss.r at intel.com>
> ---
> drivers/gpu/drm/i915/intel_ddi.c | 45 ++++++++++
> drivers/gpu/drm/i915/intel_dp.c | 179 +++++++++++++++++++++++++++++++++++++--
> drivers/gpu/drm/i915/intel_drv.h | 8 ++
> 3 files changed, 226 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
> index 7e6331a..8d224bf 100644
> --- a/drivers/gpu/drm/i915/intel_ddi.c
> +++ b/drivers/gpu/drm/i915/intel_ddi.c
> @@ -2330,6 +2330,51 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
> return connector;
> }
>
> +bool intel_bxt_upfront_link_train(struct intel_dp *intel_dp,
> + int clock, uint8_t lane_count)
> +{
> + struct intel_connector *connector = intel_dp->attached_connector;
> + struct intel_encoder *encoder = connector->encoder;
> + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
> + struct intel_shared_dpll *pll;
> + struct intel_shared_dpll_config tmp_pll_config;
> + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
> + enum intel_dpll_id dpll_id = (enum intel_dpll_id)dig_port->port;
> +
> + /*
> + * FIXME: Works only for BXT.
> + * Select the required PLL. This works for platforms where
> + * there is no shared DPLL.
> + */
> + pll = &dev_priv->shared_dplls[dpll_id];
> + if (WARN_ON(pll->active_mask)) {
> + DRM_ERROR("Shared DPLL already in use. active_mask:%x\n", pll->active_mask);
> + return false;
> + }
> +
> + tmp_pll_config = pll->config;
> +
> + if (!bxt_ddi_dp_set_dpll_hw_state(clock, &pll->config.hw_state)) {
> + DRM_ERROR("Could not setup DPLL\n");
> + pll->config = tmp_pll_config;
> + return false;
> + }
> +
> + /* Enable PLL followed by port */
> + pll->funcs.enable(dev_priv, pll);
> + intel_ddi_pre_enable_dp(encoder, clock, lane_count, pll);
> +
> + DRM_DEBUG_KMS("Upfront link train %s: link_clock:%d lanes:%d\n",
> + intel_dp->train_set_valid ? "Passed" : "Failed", clock, lane_count);
> +
> + /* Disable port followed by PLL for next retry/clean up */
> + intel_ddi_post_disable(encoder);
> + pll->funcs.disable(dev_priv, pll);
> +
> + pll->config = tmp_pll_config;
> + return intel_dp->train_set_valid;
> +}
> +
> void intel_ddi_init(struct drm_device *dev, enum port port)
> {
> struct drm_i915_private *dev_priv = dev->dev_private;
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 95ba5aa..6ecaa30 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -147,18 +147,28 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
> max_link_bw = DP_LINK_BW_1_62;
> break;
> }
> - return max_link_bw;
> + /*
> + * Limit max link bw w.r.t to the max value found
> + * using Upfront link training also.
> + */
> + return min(max_link_bw, intel_dp->max_link_bw_upfront);
This don't feel like the right place for this. I've tried to move
us away from the link_bw usage to using proper rate numbers.
This should probably be handled somewhere in intel_dp_common_rates()
or perhaps just in intel_dp_max_link_rate().
> }
>
> static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
> {
> struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
> - u8 source_max, sink_max;
> + u8 temp, source_max, sink_max;
>
> source_max = intel_dig_port->max_lanes;
> sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
>
> - return min(source_max, sink_max);
> + temp = min(source_max, sink_max);
> +
> + /*
> + * Limit max lanes w.r.t to the max value found
> + * using Upfront link training also.
> + */
> + return min(temp, intel_dp->max_lanes_upfront);
> }
>
> /*
> @@ -1590,6 +1600,15 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
> intel_dp->lane_count = lane_count;
> }
>
> +static void intel_dp_init_upfront_params(struct intel_dp *intel_dp)
> +{
> + if (WARN_ON(intel_dp->upfront_done))
> + return;
> +
> + intel_dp->max_link_bw_upfront = intel_dp->dpcd[DP_MAX_LINK_RATE];
> + intel_dp->max_lanes_upfront = drm_dp_max_lane_count(intel_dp->dpcd);
> +}
> +
> static void intel_dp_prepare(struct intel_encoder *encoder)
> {
> struct drm_device *dev = encoder->base.dev;
> @@ -3424,6 +3443,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
> intel_dp->num_sink_rates = i;
> }
>
> + /*
> + * The link_bw and lane count vaues are initialized to MAX possible
> + * value for all encoder types i.e DP, eDP, DP-MST, so that the
> + * intel_dp_max_{link_bw/lane_count} APIs do not have to worry
> + * about encoder types. They further cap the max w.r.t the upfront
> + * values also.
> + */
> + if (!intel_dp->upfront_done)
> + intel_dp_init_upfront_params(intel_dp);
With all the modeset locks involved there, I have a bad feeling this
ends up getting called from the hpd pulse work at the wrong time
perhaps leading to a deadlock.
> +
> intel_dp_print_rates(intel_dp);
>
> if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
> @@ -4140,6 +4169,132 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
> intel_dp->has_audio = false;
> }
>
> +static int intel_dp_upfront_crtc_disable(struct intel_crtc *crtc,
> + struct drm_modeset_acquire_ctx *ctx,
> + bool enable)
> +{
> + int ret;
> + struct drm_atomic_state *state;
> + struct intel_crtc_state *crtc_state;
> + struct drm_device *dev = crtc->base.dev;
> + enum pipe pipe = crtc->pipe;
> +
> + state = drm_atomic_state_alloc(dev);
> + if (!state)
> + return -ENOMEM;
> +
> + state->acquire_ctx = ctx;
> +
> + crtc_state = intel_atomic_get_crtc_state(state, crtc);
> + if (IS_ERR(crtc_state)) {
> + ret = PTR_ERR(crtc_state);
> + drm_atomic_state_free(state);
> + return ret;
> + }
> +
> + DRM_DEBUG_KMS("%sabling crtc %c %s upfront link train\n",
> + enable ? "En" : "Dis",
> + pipe_name(pipe),
> + enable ? "after" : "before");
> +
> + crtc_state->base.active = enable;
> + ret = drm_atomic_commit(state);
> + if (ret)
> + drm_atomic_state_free(state);
> +
> + return ret;
> +}
> +
> +static bool __intel_dp_upfront_link_train(struct intel_dp *intel_dp,
> + int clock, uint8_t lane_count)
> +{
> + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
> + struct drm_i915_private *dev_priv =
> + to_i915(intel_dig_port->base.base.dev);
> +
> + if (IS_BROXTON(dev_priv))
> + return intel_bxt_upfront_link_train(intel_dp, clock, lane_count);
> + /* Other platform calls go here */
> +
> + /* Return true for unsupported platforms */
> + return true;
> +}
> +static bool intel_dp_upfront_link_train(struct intel_dp *intel_dp)
> +{
> + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
> + struct intel_encoder *intel_encoder = &intel_dig_port->base;
> + struct drm_device *dev = intel_encoder->base.dev;
> + struct drm_i915_private *dev_priv = to_i915(dev);
> + struct drm_mode_config *config = &dev->mode_config;
> + struct drm_modeset_acquire_ctx ctx;
> + struct intel_crtc *intel_crtc;
> + struct drm_crtc *crtc = NULL;
> + int ret;
> + bool done = false;
> + uint8_t lane_count, max_lanes = intel_dp->max_lanes_upfront;
> + int common_rates[DP_MAX_SUPPORTED_RATES] = {};
> + int clock, common_len;
> +
> + common_len = intel_dp_common_rates(intel_dp, common_rates);
> + if (WARN_ON(common_len <= 0))
> + return 0;
> +
> + if (!IS_BROXTON(dev))
> + return 0;
> +
> + drm_modeset_acquire_init(&ctx, 0);
> +retry:
> + ret = drm_modeset_lock(&config->connection_mutex, &ctx);
> + if (ret)
> + goto exit_fail;
> +
> + if (intel_encoder->base.crtc) {
> + crtc = intel_encoder->base.crtc;
> +
> + ret = drm_modeset_lock(&crtc->mutex, &ctx);
> + if (ret)
> + goto exit_fail;
> +
> + ret = drm_modeset_lock(&crtc->primary->mutex, &ctx);
> + if (ret)
> + goto exit_fail;
> +
> + intel_crtc = to_intel_crtc(crtc);
> + ret = intel_dp_upfront_crtc_disable(intel_crtc, &ctx, false);
> + if (ret)
> + goto exit_fail;
Magically disabling stuff doens't seem right. OTOH I don't have a good
idea how we'd do this if the user yanks the cable and plugs it back in
before userspace has had a chance to shut down the pipe(s). Postpone the
detection until it's all clear, or something? Anyone have good ideas for
that?
> + }
> +
> + mutex_lock(&dev_priv->dpll_lock);
> +
> + for (lane_count = max_lanes; lane_count >= 1 && !done; lane_count >>= 1) {
> + for (clock = common_len - 1; clock >= 0 && !done; clock--) {
> + done = __intel_dp_upfront_link_train(intel_dp,
> + common_rates[clock], lane_count);
> + if (done) {
> + intel_dp->max_lanes_upfront = lane_count;
> + intel_dp->max_link_bw_upfront =
> + drm_dp_link_rate_to_bw_code(common_rates[clock]);
> + break;
> + }
> + }
> + }
> +
> + mutex_unlock(&dev_priv->dpll_lock);
> +
> + if (crtc)
> + ret = intel_dp_upfront_crtc_disable(intel_crtc, &ctx, true);
> +
> +exit_fail:
> + if (ret == -EDEADLK) {
> + drm_modeset_backoff(&ctx);
> + goto retry;
> + }
> + drm_modeset_drop_locks(&ctx);
> + drm_modeset_acquire_fini(&ctx);
> + return done;
> +}
> +
> static void
> intel_dp_long_pulse(struct intel_connector *intel_connector)
> {
> @@ -4150,7 +4305,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
> struct drm_device *dev = connector->dev;
> enum drm_connector_status status;
> enum intel_display_power_domain power_domain;
> - bool ret;
> + bool ret, do_upfront_link_train;
> u8 sink_irq_vector;
>
> power_domain = intel_display_port_aux_power_domain(intel_encoder);
> @@ -4235,10 +4390,22 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
> DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
> }
>
> + /* Do not do upfront link train, if it is a compliance request */
> + do_upfront_link_train = !intel_dp->upfront_done &&
> + intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT &&
> + intel_dp->compliance_test_type != DP_TEST_LINK_TRAINING;
> +
> + if (do_upfront_link_train) {
> + intel_dp->upfront_done = intel_dp_upfront_link_train(intel_dp);
> + if (!intel_dp->upfront_done)
> + status = connector_status_disconnected;
> + }
> +
> out:
> - if ((status != connector_status_connected) &&
> - (intel_dp->is_mst == false))
> + if (status != connector_status_connected && !intel_dp->is_mst) {
> intel_dp_unset_edid(intel_dp);
> + intel_dp->upfront_done = false;
> + }
>
> intel_display_power_put(to_i915(dev), power_domain);
> return;
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index bcf570f..060ea9b 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -832,6 +832,12 @@ struct intel_dp {
> enum hdmi_force_audio force_audio;
> bool limited_color_range;
> bool color_range_auto;
> +
> + /* Upfront link train parameters */
> + int max_link_bw_upfront;
> + uint8_t max_lanes_upfront;
> + bool upfront_done;
> +
> uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
> uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
> uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
> @@ -1113,6 +1119,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
> struct intel_crtc_state *pipe_config);
> void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
> uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
> +bool intel_bxt_upfront_link_train(struct intel_dp *intel_dp,
> + int clock, uint8_t lane_count);
>
> /* intel_frontbuffer.c */
> void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
> --
> 1.9.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Ville Syrjälä
Intel OTC
More information about the Intel-gfx
mailing list