[PATCH 08/12] drm/i915/display: Add guardband check for feature latencies

Jani Nikula jani.nikula at linux.intel.com
Mon Aug 11 15:14:54 UTC 2025


On Thu, 07 Aug 2025, Ankit Nautiyal <ankit.k.nautiyal at intel.com> wrote:
> Add a check during atomic crtc check phase to ensure the programmed VRR
> guardband is sufficient to cover latencies introduced by enabled features
> such as DSC, PSR/PR, scalers, and DP SDPs.
>
> Currently, the guardband is programmed to match the vblank length, so
> existing checks in skl_is_vblank_too_short() are valid. However, upcoming
> changes will optimize the guardband independently of vblank, making those
> checks incorrect.
>
> Introduce an explicit guardband check to prepare for future updates
> that will remove checking against the vblank length and later program an
> optimized guardband.
>
> v2: Use new helper for PSR2/Panel Replay latency.
>
> Signed-off-by: Ankit Nautiyal <ankit.k.nautiyal at intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_display.c | 138 +++++++++++++++++++
>  drivers/gpu/drm/i915/display/skl_watermark.c |   2 +-
>  drivers/gpu/drm/i915/display/skl_watermark.h |   1 +
>  3 files changed, 140 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index af4d54672d0d..c542a3110051 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -4227,6 +4227,138 @@ static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
>  	return 0;
>  }
>  
> +static int
> +cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
> +{
> +	struct intel_display *display = to_intel_display(crtc_state);
> +	struct intel_atomic_state *state =
> +		to_intel_atomic_state(crtc_state->uapi.state);
> +	const struct intel_cdclk_state *cdclk_state;
> +
> +	cdclk_state = intel_atomic_get_cdclk_state(state);
> +	if (IS_ERR(cdclk_state)) {
> +		drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
> +		return 1;
> +	}
> +
> +	return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
> +				   2 * intel_cdclk_logical(cdclk_state)));
> +}
> +
> +static int
> +dsc_prefill_latency(const struct intel_crtc_state *crtc_state, int linetime)
> +{
> +	const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
> +	int chroma_downscaling_factor = skl_scaler_chroma_downscale_factor(crtc_state);
> +	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
> +	int num_scaler_users = hweight32(scaler_state->scaler_users);
> +	u64 hscale_k[ARRAY_SIZE(scaler_state->scalers)];
> +	u64 vscale_k[ARRAY_SIZE(scaler_state->scalers)];
> +	u32 dsc_prefill_latency = 0;
> +
> +	if (!crtc_state->dsc.compression_enable ||
> +	    !num_scaler_users ||
> +	    num_scaler_users > crtc->num_scalers ||
> +	    num_scaler_users > ARRAY_SIZE(scaler_state->scalers))
> +		return dsc_prefill_latency;
> +
> +	for (int i = 0; i < num_scaler_users; i++) {
> +		hscale_k[i] = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
> +		vscale_k[i] = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
> +	}
> +
> +	dsc_prefill_latency =
> +		intel_display_dsc_prefill_latency(num_scaler_users, hscale_k, vscale_k,
> +						  chroma_downscaling_factor,
> +						  cdclk_prefill_adjustment(crtc_state),
> +						  linetime);
> +
> +	return dsc_prefill_latency;
> +}
> +
> +static int
> +scaler_prefill_latency(const struct intel_crtc_state *crtc_state, int linetime)
> +{
> +	const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
> +	int chroma_downscaling_factor = skl_scaler_chroma_downscale_factor(crtc_state);
> +	int num_scaler_users = hweight32(scaler_state->scaler_users);
> +	u64 hscale_k = 1000, vscale_k = 1000;
> +	int scaler_prefill_latency = 0;
> +
> +	if (!num_scaler_users)
> +		return scaler_prefill_latency;
> +
> +	if (num_scaler_users > 1) {
> +		hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
> +		vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
> +	}
> +
> +	scaler_prefill_latency =
> +		intel_display_scaler_prefill_latency(num_scaler_users, hscale_k, vscale_k,
> +						     chroma_downscaling_factor,
> +						     cdclk_prefill_adjustment(crtc_state),
> +						     linetime);
> +
> +	return scaler_prefill_latency;
> +}
> +
> +static int intel_crtc_check_guardband(struct intel_crtc_state *crtc_state)

Please avoid "check" naming like this. In general, it's a poor verb to
use, because what it does is ambiguous from the name. Is it an
assertion, does it return a value, what does it do?

However, as a special case, if a function gets called form the atomic
check path (which I also think is ill-named, but what can you do), with
the same parameters and conventions, then name it accordingly.

Thus intel_crtc_guardband_atomic_check().

BR,
Jani.




> +{
> +	struct intel_display *display = to_intel_display(crtc_state);
> +	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
> +	int dsc_prefill_time = 0;
> +	int scaler_prefill_time;
> +	int wm0_prefill_time;
> +	int pkgc_max_latency;
> +	int psr2_pr_latency;
> +	int min_guardband;
> +	int guardband_us;
> +	int sagv_latency;
> +	int linetime_us;
> +	int sdp_latency;
> +	int pm_delay;
> +
> +	if (!crtc_state->vrr.enable && !intel_vrr_always_use_vrr_tg(display))
> +		return 0;
> +
> +	if (!adjusted_mode->crtc_clock)
> +		return 0;
> +
> +	linetime_us = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000,
> +				   adjusted_mode->crtc_clock);
> +
> +	pkgc_max_latency = skl_watermark_max_latency(display, 1);
> +	sagv_latency = display->sagv.block_time_us;
> +
> +	wm0_prefill_time = skl_max_wm0_lines(crtc_state) * linetime_us + 20;
> +
> +	scaler_prefill_time = scaler_prefill_latency(crtc_state, linetime_us);
> +
> +	if (crtc_state->dsc.compression_enable)
> +		dsc_prefill_time = dsc_prefill_latency(crtc_state, linetime_us);
> +
> +	pm_delay = crtc_state->framestart_delay +
> +		   max(sagv_latency, pkgc_max_latency) +
> +		   wm0_prefill_time +
> +		   scaler_prefill_time +
> +		   dsc_prefill_time;
> +
> +	psr2_pr_latency = intel_alpm_compute_max_link_wake_latency(crtc_state, false);
> +	sdp_latency = intel_dp_compute_sdp_latency(crtc_state, false);
> +
> +	guardband_us = max(sdp_latency, psr2_pr_latency);
> +	guardband_us = max(guardband_us, pm_delay);
> +	min_guardband = DIV_ROUND_UP(guardband_us, linetime_us);
> +
> +	if (crtc_state->vrr.guardband < min_guardband) {
> +		drm_dbg_kms(display->drm, "vrr.guardband %d < min guardband %d\n",
> +			    crtc_state->vrr.guardband, min_guardband);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
>  static int intel_crtc_atomic_check(struct intel_atomic_state *state,
>  				   struct intel_crtc *crtc)
>  {
> @@ -4289,6 +4421,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
>  	if (ret)
>  		return ret;
>  
> +	if (HAS_VRR(display) && intel_vrr_possible(crtc_state)) {
> +		ret = intel_crtc_check_guardband(crtc_state);
> +		if (ret)
> +			return ret;
> +	}
> +
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
> index 4474f987de06..5ffa76cb1633 100644
> --- a/drivers/gpu/drm/i915/display/skl_watermark.c
> +++ b/drivers/gpu/drm/i915/display/skl_watermark.c
> @@ -2249,7 +2249,7 @@ skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
>  		adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
>  }
>  
> -static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
> +int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
>  {
>  	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
>  	enum plane_id plane_id;
> diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
> index 62790816f030..8706c2010ebe 100644
> --- a/drivers/gpu/drm/i915/display/skl_watermark.h
> +++ b/drivers/gpu/drm/i915/display/skl_watermark.h
> @@ -78,6 +78,7 @@ void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
>  void intel_program_dpkgc_latency(struct intel_atomic_state *state);
>  
>  bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
> +int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state);
>  
>  #endif /* __SKL_WATERMARK_H__ */

-- 
Jani Nikula, Intel


More information about the Intel-xe mailing list