[Intel-gfx] [PATCH v8 07/19] drm/i915/dp: Compute DSC pipe config in atomic check

Manasi Navare manasi.d.navare at intel.com
Wed Nov 7 22:31:56 UTC 2018


On Tue, Nov 06, 2018 at 12:37:59PM -0800, Manasi Navare wrote:
> On Tue, Nov 06, 2018 at 04:42:56PM +0200, Ville Syrjälä wrote:
> > On Fri, Nov 02, 2018 at 07:09:03PM -0700, Manasi Navare wrote:
> > > On Fri, Nov 02, 2018 at 02:31:26PM -0700, Manasi Navare wrote:
> > > > DSC params like the enable, compressed bpp, slice count and
> > > > dsc_split are added to the intel_crtc_state. These parameters
> > > > are set based on the requested mode and available link parameters
> > > > during the pipe configuration in atomic check phase.
> > > > These values are then later used to populate the remaining DSC
> > > > and RC parameters before enbaling DSC in atomic commit.
> > > > 
> > > > v11:
> > > > * Const crtc_state, reject DSC on DP without FEC (Ville)
> > > > * Dont set dsc_split to false (Ville)
> > > > v10:
> > > > * Add a helper for dp_dsc support (Ville)
> > > > * Set pipe_config to max bpp, link params for DSC for now (Ville)
> > > > * Compute bpp - use dp dsc support helper (Ville)
> > > > v9:
> > > > * Rebase on top of drm-tip that now uses fast_narrow config
> > > > for edp (Manasi)
> > > > v8:
> > > > * Check for DSC bpc not 0 (manasi)
> > > > 
> > > > v7:
> > > > * Fix indentation in compute_m_n (Manasi)
> > > > 
> > > > v6 (From Gaurav):
> > > > * Remove function call of intel_dp_compute_dsc_params() and
> > > > invoke intel_dp_compute_dsc_params() in the patch where
> > > > it is defined to fix compilation warning (Gaurav)
> > > > 
> > > > v5:
> > > > Add drm_dsc_cfg in intel_crtc_state (Manasi)
> > > > 
> > > > v4:
> > > > * Rebase on refactoring of intel_dp_compute_config on tip (Manasi)
> > > > * Add a comment why we need to check PSR while enabling DSC (Gaurav)
> > > > 
> > > > v3:
> > > > * Check PPR > max_cdclock to use 2 VDSC instances (Ville)
> > > > 
> > > > v2:
> > > > * Add if-else for eDP/DP (Gaurav)
> > > > 
> > > > Cc: Jani Nikula <jani.nikula at linux.intel.com>
> > > > Cc: Ville Syrjala <ville.syrjala at linux.intel.com>
> > > > Cc: Anusha Srivatsa <anusha.srivatsa at intel.com>
> > > > Cc: Gaurav K Singh <gaurav.k.singh at intel.com>
> > > > Signed-off-by: Manasi Navare <manasi.d.navare at intel.com>
> > > > Reviewed-by: Anusha Srivatsa <anusha.srivatsa at intel.com>
> > > > Acked-by: Jani Nikula <jani.nikula at linux.intel.com>
> > > > ---
> > > >  drivers/gpu/drm/i915/intel_display.c |  20 +++-
> > > >  drivers/gpu/drm/i915/intel_display.h |   3 +-
> > > >  drivers/gpu/drm/i915/intel_dp.c      | 167 ++++++++++++++++++++++++---
> > > >  drivers/gpu/drm/i915/intel_dp_mst.c  |   2 +-
> > > >  4 files changed, 166 insertions(+), 26 deletions(-)
> > > > 
> > > > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> > > > index b219d5858160..477e53c37353 100644
> > > > --- a/drivers/gpu/drm/i915/intel_display.c
> > > > +++ b/drivers/gpu/drm/i915/intel_display.c
> > > > @@ -6442,7 +6442,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
> > > >  
> > > >  	pipe_config->fdi_lanes = lane;
> > > >  
> > > > -	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
> > > > +	intel_link_compute_m_n(pipe_config->pipe_bpp, 0, lane, fdi_dotclock,
> > > >  			       link_bw, &pipe_config->fdi_m_n, false);
> > > >  
> > > >  	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
> > > > @@ -6679,17 +6679,25 @@ static void compute_m_n(unsigned int m, unsigned int n,
> > > >  }
> > > >  
> > > >  void
> > > > -intel_link_compute_m_n(int bits_per_pixel, int nlanes,
> > > > +intel_link_compute_m_n(int bits_per_pixel, uint16_t compressed_bpp,
> > > > +		       int nlanes,
> > > >  		       int pixel_clock, int link_clock,
> > > >  		       struct intel_link_m_n *m_n,
> > > >  		       bool constant_n)
> > > >  {
> > > >  	m_n->tu = 64;
> > > >  
> > > > -	compute_m_n(bits_per_pixel * pixel_clock,
> > > > -		    link_clock * nlanes * 8,
> > > > -		    &m_n->gmch_m, &m_n->gmch_n,
> > > > -		    constant_n);
> > > > +	/* For DSC, Data M/N calculation uses compressed BPP */
> > > > +	if (compressed_bpp)
> > > > +		compute_m_n(compressed_bpp * pixel_clock,
> > > > +			    link_clock * nlanes * 8,
> > > > +			    &m_n->gmch_m, &m_n->gmch_n,
> > > > +			    constant_n);
> > > > +	else
> > > > +		compute_m_n(bits_per_pixel * pixel_clock,
> > > > +			    link_clock * nlanes * 8,
> > > > +			    &m_n->gmch_m, &m_n->gmch_n,
> > > > +			    constant_n);
> > > >  
> > > >  	compute_m_n(pixel_clock, link_clock,
> > > >  		    &m_n->link_m, &m_n->link_n,
> > > > diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
> > > > index 5d50decbcbb5..b0b23e1e9392 100644
> > > > --- a/drivers/gpu/drm/i915/intel_display.h
> > > > +++ b/drivers/gpu/drm/i915/intel_display.h
> > > > @@ -407,7 +407,8 @@ struct intel_link_m_n {
> > > >  	     (__i)++) \
> > > >  		for_each_if(plane)
> > > >  
> > > > -void intel_link_compute_m_n(int bpp, int nlanes,
> > > > +void intel_link_compute_m_n(int bpp, uint16_t compressed_bpp,
> > > > +			    int nlanes,
> > > >  			    int pixel_clock, int link_clock,
> > > >  			    struct intel_link_m_n *m_n,
> > > >  			    bool constant_n);
> > > > diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> > > > index b39b4bda8e40..58326fc9d935 100644
> > > > --- a/drivers/gpu/drm/i915/intel_dp.c
> > > > +++ b/drivers/gpu/drm/i915/intel_dp.c
> > > > @@ -47,6 +47,8 @@
> > > >  
> > > >  /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
> > > >  #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER	61440
> > > > +#define DP_DSC_MIN_SUPPORTED_BPC		8
> > > > +#define DP_DSC_MAX_SUPPORTED_BPC		10
> > > >  
> > > >  /* DP DSC throughput values used for slice count calculations KPixels/s */
> > > >  #define DP_DSC_PEAK_PIXEL_RATE			2720000
> > > > @@ -1840,6 +1842,29 @@ struct link_config_limits {
> > > >  	int min_bpp, max_bpp;
> > > >  };
> > > >  
> > > > +static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
> > > > +					 const struct intel_crtc_state *pipe_config)
> > > > +{
> > > > +	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
> > > > +
> > > > +	/* FIXME: FEC needed for external DP until then reject DSC on DP */
> > > > +	if (!intel_dp_is_edp(intel_dp))
> > > > +		return false;
> > > > +
> > > > +	return INTEL_GEN(dev_priv) >= 10 &&
> > > > +		pipe_config->cpu_transcoder != TRANSCODER_A;
> > > > +}
> > > > +
> > > > +static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
> > > > +				  const struct intel_crtc_state *pipe_config)
> > > > +{
> > > > +	if (!intel_dp_source_supports_dsc(intel_dp, pipe_config) ||
> > > > +	    !drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))
> > > > +		return false;
> > > > +
> > > > +	return true;
> > > > +}
> > > > +
> > > >  static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
> > > >  				struct intel_crtc_state *pipe_config)
> > > >  {
> > > > @@ -1863,6 +1888,15 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
> > > >  		}
> > > >  	}
> > > >  
> > > > +	/* If DSC is supported, use the max value reported by panel */
> > > > +	if (intel_dp_supports_dsc(intel_dp, pipe_config)) {
> > > > +		bpc = min_t(u8,
> > > > +			    drm_dp_dsc_sink_max_color_depth(intel_dp->dsc_dpcd),
> > > > +			    DP_DSC_MAX_SUPPORTED_BPC);
> > > > +		if (bpc)
> > > > +			bpp = min(bpp, 3*bpc);

I am just leaving it at bpp = 3 *bpc here since taking a min will bring it down to the vbt bpp
which we dont want in case DSC is suppoorted that should override the vbt and EDID bpp.

Manasi

> > > 
> > > Ville, the problem with chosing the min between existing bpp and 3 * bpc (computed from DSC dpcd) is that EDID based bpc
> > > might be lower and in case DSC is supported, the bpc value should be based on the dsc color depth and that should
> > > override the value obtained from EDID.  This was confirmed from the VDSC programming documents obtained from
> > > HW architecture folks
> > > 
> > > So i think it should still be if DSC supported use the bpc directly from min_t(u8, drm_dp_dsc_sink_max_color_depth(intel_dp->dsc_dpcd),DP_DSC_MAX_SUPPORTED_BPC);
> > 
> > Maybe
> > 
> > bpc_min = something;
> > bpc_max = something_else;
> 
> So this bpc_max will come from intel_dp_compute_bpp() and there it will just return bpc from DSC DPCD color depth if DSC supported.
> So then in that function we dont need min(bpp, 3*bpc); right?
> 
> > bpp = clamp(bpp, bpc_min*3, bpc_max*3);
> 
> And then we clamp the pipe_bpp between min bpc and max bpc, right?
> 
> Manasi
> 
> 
> > 
> > Either that or we just return an error of the requested bpp is below the
> > dsc minimum.
> > 
> > > 
> > > Manasi
> > > 
> > > > +	}
> > > > +
> > > >  	return bpp;
> > > >  }
> > > >  
> > > > @@ -1974,6 +2008,80 @@ intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
> > > >  	return false;
> > > >  }
> > > >  
> > > > +static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
> > > > +					struct intel_crtc_state *pipe_config,
> > > > +					const struct link_config_limits *limits)
> > > > +{
> > > > +	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
> > > > +	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
> > > > +	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
> > > > +	u16 dsc_max_output_bpp = 0;
> > > > +	u8 dsc_dp_slice_count = 0;
> > > > +
> > > > +	if (!intel_dp_supports_dsc(intel_dp, pipe_config))
> > > > +		return false;
> > > > +
> > > > +	/* DSC not supported for DSC sink BPC < 8 */
> > > > +	if (limits->max_bpp < 3 * DP_DSC_MIN_SUPPORTED_BPC) {
> > > > +		DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
> > > > +		return false;
> > > > +	}
> > > > +
> > > > +	/*
> > > > +	 * For now enable DSC for max bpp, max link rate, max lane count.
> > > > +	 * Optimize this later for the minimum possible link rate/lane count
> > > > +	 * with DSC enabled for the requested mode.
> > > > +	 */
> > > > +	pipe_config->pipe_bpp = limits->max_bpp;
> > > > +	pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
> > > > +	pipe_config->lane_count = limits->max_lane_count;
> > > > +
> > > > +	if (intel_dp_is_edp(intel_dp)) {
> > > > +		pipe_config->dsc_params.compressed_bpp =
> > > > +			drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
> > > > +		pipe_config->dsc_params.slice_count =
> > > > +			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
> > > > +							true);
> > > > +	} else {
> > > > +		dsc_max_output_bpp =
> > > > +			intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
> > > > +						    pipe_config->lane_count,
> > > > +						    adjusted_mode->crtc_clock,
> > > > +						    adjusted_mode->crtc_hdisplay);
> > > > +		dsc_dp_slice_count =
> > > > +			intel_dp_dsc_get_slice_count(intel_dp,
> > > > +						     adjusted_mode->crtc_clock,
> > > > +						     adjusted_mode->crtc_hdisplay);
> > > > +		if (!(dsc_max_output_bpp && dsc_dp_slice_count)) {
> > > > +			DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
> > > > +			return false;
> > > > +		}
> > > > +		pipe_config->dsc_params.compressed_bpp = dsc_max_output_bpp >> 4;
> > > > +		pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
> > > > +	}
> > > > +	/*
> > > > +	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
> > > > +	 * is greater than the maximum Cdclock and if slice count is even
> > > > +	 * then we need to use 2 VDSC instances.
> > > > +	 */
> > > > +	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
> > > > +		if (pipe_config->dsc_params.slice_count > 1) {
> > > > +			pipe_config->dsc_params.dsc_split = true;
> > > > +		} else {
> > > > +			DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
> > > > +			return false;
> > > > +		}
> > > > +	}
> > > > +	pipe_config->dsc_params.compression_enable = true;
> > > > +	DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
> > > > +		      "Compressed Bpp = %d Slice Count = %d\n",
> > > > +		      pipe_config->pipe_bpp,
> > > > +		      pipe_config->dsc_params.compressed_bpp,
> > > > +		      pipe_config->dsc_params.slice_count);
> > > > +
> > > > +	return true;
> > > > +}
> > > > +
> > > >  static bool
> > > >  intel_dp_compute_link_config(struct intel_encoder *encoder,
> > > >  			     struct intel_crtc_state *pipe_config)
> > > > @@ -1982,6 +2090,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
> > > >  	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
> > > >  	struct link_config_limits limits;
> > > >  	int common_len;
> > > > +	bool ret = false;
> > > >  
> > > >  	common_len = intel_dp_common_len_rate_limit(intel_dp,
> > > >  						    intel_dp->max_link_rate);
> > > > @@ -1995,8 +2104,12 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
> > > >  	limits.min_lane_count = 1;
> > > >  	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
> > > >  
> > > > -	limits.min_bpp = 6 * 3;
> > > >  	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
> > > > +	if (intel_dp_supports_dsc(intel_dp, pipe_config) &&
> > > > +	    limits.max_bpp >= DP_DSC_MIN_SUPPORTED_BPC*3)
> > > > +		limits.min_bpp = DP_DSC_MIN_SUPPORTED_BPC * 3;
> > > > +	else
> > > > +		limits.min_bpp = 6 * 3;
> > > >  
> > > >  	if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
> > > >  		/*
> > > > @@ -2020,7 +2133,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
> > > >  		      intel_dp->common_rates[limits.max_clock],
> > > >  		      limits.max_bpp, adjusted_mode->crtc_clock);
> > > >  
> > > > -	if (intel_dp_is_edp(intel_dp)) {
> > > > +	if (intel_dp_is_edp(intel_dp))
> > > >  		/*
> > > >  		 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
> > > >  		 * section A.1: "It is recommended that the minimum number of
> > > > @@ -2030,26 +2143,42 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
> > > >  		 * Note that we use the max clock and lane count for eDP 1.3 and
> > > >  		 * earlier, and fast vs. wide is irrelevant.
> > > >  		 */
> > > > -		if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
> > > > -						       &limits))
> > > > -			return false;
> > > > -	} else {
> > > > +		ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
> > > > +							&limits);
> > > > +	else
> > > >  		/* Optimize for slow and wide. */
> > > > -		if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
> > > > -						       &limits))
> > > > +		ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
> > > > +							&limits);
> > > > +
> > > > +	/* enable compression if the mode doesn't fit available BW */
> > > > +	if (!ret) {
> > > > +		if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
> > > > +						 &limits))
> > > >  			return false;
> > > >  	}
> > > >  
> > > > -	DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
> > > > -		      pipe_config->lane_count, pipe_config->port_clock,
> > > > -		      pipe_config->pipe_bpp);
> > > > +	if (pipe_config->dsc_params.compression_enable) {
> > > > +		DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
> > > > +			      pipe_config->lane_count, pipe_config->port_clock,
> > > > +			      pipe_config->pipe_bpp,
> > > > +			      pipe_config->dsc_params.compressed_bpp);
> > > >  
> > > > -	DRM_DEBUG_KMS("DP link rate required %i available %i\n",
> > > > -		      intel_dp_link_required(adjusted_mode->crtc_clock,
> > > > -					     pipe_config->pipe_bpp),
> > > > -		      intel_dp_max_data_rate(pipe_config->port_clock,
> > > > -					     pipe_config->lane_count));
> > > > +		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
> > > > +			      intel_dp_link_required(adjusted_mode->crtc_clock,
> > > > +						     pipe_config->dsc_params.compressed_bpp),
> > > > +			      intel_dp_max_data_rate(pipe_config->port_clock,
> > > > +						     pipe_config->lane_count));
> > > > +	} else {
> > > > +		DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
> > > > +			      pipe_config->lane_count, pipe_config->port_clock,
> > > > +			      pipe_config->pipe_bpp);
> > > >  
> > > > +		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
> > > > +			      intel_dp_link_required(adjusted_mode->crtc_clock,
> > > > +						     pipe_config->pipe_bpp),
> > > > +			      intel_dp_max_data_rate(pipe_config->port_clock,
> > > > +						     pipe_config->lane_count));
> > > > +	}
> > > >  	return true;
> > > >  }
> > > >  
> > > > @@ -2133,7 +2262,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
> > > >  			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
> > > >  	}
> > > >  
> > > > -	intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
> > > > +	intel_link_compute_m_n(pipe_config->pipe_bpp,
> > > > +			       pipe_config->dsc_params.compressed_bpp,
> > > > +			       pipe_config->lane_count,
> > > >  			       adjusted_mode->crtc_clock,
> > > >  			       pipe_config->port_clock,
> > > >  			       &pipe_config->dp_m_n,
> > > > @@ -2142,7 +2273,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
> > > >  	if (intel_connector->panel.downclock_mode != NULL &&
> > > >  		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
> > > >  			pipe_config->has_drrs = true;
> > > > -			intel_link_compute_m_n(pipe_config->pipe_bpp,
> > > > +			intel_link_compute_m_n(pipe_config->pipe_bpp, 0,
> > > >  					       pipe_config->lane_count,
> > > >  					       intel_connector->panel.downclock_mode->clock,
> > > >  					       pipe_config->port_clock,
> > > > diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
> > > > index 8b71d64ebd9d..e02caedd443c 100644
> > > > --- a/drivers/gpu/drm/i915/intel_dp_mst.c
> > > > +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
> > > > @@ -90,7 +90,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
> > > >  		}
> > > >  	}
> > > >  
> > > > -	intel_link_compute_m_n(bpp, lane_count,
> > > > +	intel_link_compute_m_n(bpp, 0, lane_count,
> > > >  			       adjusted_mode->crtc_clock,
> > > >  			       pipe_config->port_clock,
> > > >  			       &pipe_config->dp_m_n,
> > > > -- 
> > > > 2.18.0
> > > > 
> > 
> > -- 
> > Ville Syrjälä
> > Intel
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx


More information about the Intel-gfx mailing list