[Intel-gfx] [PATCH 5/5] drm/i915: add support for 5/6 data buffer partitioning on Haswell

Ville Syrjälä ville.syrjala at linux.intel.com
Wed May 29 18:17:50 CEST 2013


On Fri, May 24, 2013 at 11:59:21AM -0300, Paulo Zanoni wrote:
> From: Paulo Zanoni <paulo.r.zanoni at intel.com>
> 
> Now we compute the results for both 1/2 and 5/6 partitioning and then
> use hsw_find_best_result to choose which one to use.
> 
> With this patch, Haswell watermarks support should be in good shape.
> The only improvement we're missing is the case where the primary plane
> is disabled: we always assume it's enabled, so we take it into
> consideration when calculating the watermarks.
> 
> Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
> ---
>  drivers/gpu/drm/i915/intel_pm.c | 64 ++++++++++++++++++++++++++++++++++-------
>  1 file changed, 53 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 9f9eb48..6fdfd1a 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -2344,7 +2344,8 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
>  static void hsw_compute_wm_parameters(struct drm_device *dev,
>  				      struct hsw_pipe_wm_parameters *params,
>  				      uint32_t *wm,
> -				      struct hsw_wm_maximums *lp_max_1_2)
> +				      struct hsw_wm_maximums *lp_max_1_2,
> +				      struct hsw_wm_maximums *lp_max_5_6)
>  {
>  	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct drm_crtc *crtc;
> @@ -2399,15 +2400,17 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
>  	}
>  
>  	if (pipes_active > 1) {
> -		lp_max_1_2->pri = sprites_enabled ? 128 : 256;
> -		lp_max_1_2->spr = 128;
> -		lp_max_1_2->cur = 64;
> +		lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
> +		lp_max_1_2->spr = lp_max_5_6->spr = 128;
> +		lp_max_1_2->cur = lp_max_5_6->cur = 64;
>  	} else {
>  		lp_max_1_2->pri = sprites_enabled ? 384 : 768;
> +		lp_max_5_6->pri = sprites_enabled ? 128 : 768;
>  		lp_max_1_2->spr = 384;
> -		lp_max_1_2->cur = 255;
> +		lp_max_5_6->spr = 640;
> +		lp_max_1_2->cur = lp_max_5_6->cur = 255;
>  	}
> -	lp_max_1_2->fbc = 15;
> +	lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
>  }
>  
>  static void hsw_compute_wm_results(struct drm_device *dev,
> @@ -2488,6 +2491,32 @@ static void hsw_compute_wm_results(struct drm_device *dev,
>  	}
>  }
>  
> +/* Find the result with the highest level enabled. Check for enable_fbc_wm in
> + * case both are at the same level. Prefer r1 in case they're the same. */
> +struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
> +					   struct hsw_wm_values *r2)
> +{
> +	int i, val_r1 = 0, val_r2 = 0;
> +
> +	for (i = 0; i < 3; i++) {
> +		if (r1->wm_lp[i] & WM3_LP_EN)
> +			val_r1 |= (1 << i);
> +		if (r2->wm_lp[i] & WM3_LP_EN)
> +			val_r2 |= (1 << i);

This could just be:
  if (r1->wm_lp[i] & WM3_LP_EN)
    val_r1 = i
  if (r2->wm_lp[i] & WM3_LP_EN)
    val_r2 = i;

And maybe call them max_r1 and max_r2 or something...

> +	}
> +
> +	if (val_r1 == val_r2) {
> +		if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
> +			return r2;
> +		else
> +			return r1;
> +	} else if (val_r1 > val_r2) {
> +		return r1;
> +	} else {
> +		return r2;
> +	}
> +}
> +
>  /*
>   * The spec says we shouldn't write when we don't need, because every write
>   * causes WMs to be re-evaluated, expending some power.
> @@ -2584,14 +2613,27 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
>  static void haswell_update_wm(struct drm_device *dev)
>  {
>  	struct drm_i915_private *dev_priv = dev->dev_private;
> -	struct hsw_wm_maximums lp_max_1_2;
> +	struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
>  	struct hsw_pipe_wm_parameters params[3];
> -	struct hsw_wm_values results;
> +	struct hsw_wm_values results_1_2, results_5_6, *best_results;
>  	uint32_t wm[5];
> +	enum hsw_data_buf_partitioning partitioning;
> +
> +	hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
> +
> +	hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
> +	if (lp_max_1_2.pri != lp_max_5_6.pri) {
> +		hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
> +				       &results_5_6);
> +		best_results = hsw_find_best_result(&results_1_2, &results_5_6);
> +	} else {
> +		best_results = &results_1_2;
> +	}
> +
> +	partitioning = (best_results == &results_1_2) ?
> +		       HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
>  
> -	hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2);
> -	hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results);
> -	hsw_write_wm_values(dev_priv, &results, HSW_DATA_BUF_PART_1_2);
> +	hsw_write_wm_values(dev_priv, best_results, partitioning);
>  }
>  
>  static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
> -- 
> 1.8.1.2
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Ville Syrjälä
Intel OTC



More information about the Intel-gfx mailing list