[Intel-xe] [PATCH 2/2] drm/i915: handle uncore spinlock when not available

Rodrigo Vivi rodrigo.vivi at intel.com
Thu Oct 12 19:05:59 UTC 2023


On Tue, Sep 19, 2023 at 12:47:37PM +0300, Luca Coelho wrote:
> The uncore code may not always be available (e.g. when we build the
> display code with Xe), so we can't always rely on having the uncore's
> spinlock.
> 
> To handle this, split the spin_lock/unlock_irqsave/restore() into
> spin_lock/unlock() followed by a call to local_irq_save/restore() and
> create wrapper functions for locking and unlocking the uncore's
> spinlock.  In these functions, we have a condition check and only
> actually try to lock/unlock the spinlock when I915 is defined, and
> thus uncore is available.
> 
> This keeps the ifdefs contained in these new functions and all such
> logic inside the display code.
> 
> Signed-off-by: Luca Coelho <luciano.coelho at intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_display.h | 20 +++++++++++++
>  drivers/gpu/drm/i915/display/intel_vblank.c  | 31 +++++++-------------
>  2 files changed, 30 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
> index 51eb65109d39..fdf9ff68a8a6 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.h
> +++ b/drivers/gpu/drm/i915/display/intel_display.h
> @@ -565,4 +565,24 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
>  
>  bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915);
>  
> +/*
> + * The uncore version of the spin lock functions is used to decide
> + * whether we need to lock the uncore lock or not.  This is only
> + * needed in i915, not in Xe.  Keep the decision-making centralized
> + * here.
> + */
> +static inline void uncore_spin_lock(spinlock_t *lock)
> +{
> +#ifdef I915
> +	spin_lock(lock);
> +#endif
> +}
> +
> +static inline void uncore_spin_unlock(spinlock_t *lock)
> +{
> +#ifdef I915
> +	spin_unlock(lock);
> +#endif
> +}
> +
>  #endif
> diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
> index 416aa6749eaf..97841fcf5b0f 100644
> --- a/drivers/gpu/drm/i915/display/intel_vblank.c
> +++ b/drivers/gpu/drm/i915/display/intel_vblank.c
> @@ -292,11 +292,9 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
>  	 * register reads, potentially with preemption disabled, so the
>  	 * following code must not block on uncore.lock.
>  	 */
> -#ifdef I915
> -	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
> -#else

A patch like this should be done first purely in i915, then ported to drm-xe-next
with the proper clean-up.

But at this point, I'm asking myself why does display code needs
to take the uncore lock to start with? what kind of races or serialization
are we trying to accomplish with this?

Trying to understand the big picture here so we can try to find the right
solution. I mean, maybe the right solution is even add the spin_locks to
xe_mmio and make both xe_mmio and intel_uncore to export helpers for the
external locking mechanism?

Although to me it sounds like the pitfall of using locks to protect code
and not data: https://blog.ffwll.ch/2022/07/locking-engineering.html

> +	uncore_spin_lock(&dev_priv->uncore.lock);
>  	local_irq_save(irqflags);
> -#endif
> +
>  	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
>  
>  	/* Get optional system timestamp before query. */
> @@ -363,11 +361,9 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
>  
>  	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
>  
> -#ifdef I915
> -	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
> -#else
> +	uncore_spin_unlock(&dev_priv->uncore.lock);
>  	local_irq_restore(irqflags);
> -#endif
> +
>  	/*
>  	 * While in vblank, position will be negative
>  	 * counting up towards 0 at vbl_end. And outside
> @@ -402,17 +398,14 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
>  {
>  	unsigned long irqflags;
>  	int position;
> -#ifdef I915
> -	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
>  
> -	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
> -	position = __intel_get_crtc_scanline(crtc);
> -	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
> -#else
> +	uncore_spin_lock(&to_i915(crtc->base.dev)->uncore.lock);
>  	local_irq_save(irqflags);
>  	position = __intel_get_crtc_scanline(crtc);
>  	local_irq_restore(irqflags);
> -#endif
> +
> +	uncore_spin_unlock(&to_i915(crtc->base.dev)->uncore.lock);
> +
>  	return position;
>  }
>  
> @@ -534,9 +527,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
>  	 * Need to audit everything to make sure it's safe.
>  	 */
>  	spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
> -#ifdef I915
> -	spin_lock(&i915->uncore.lock);
> -#endif
> +	uncore_spin_lock(&i915->uncore.lock);
>  
>  	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
>  
> @@ -545,8 +536,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
>  	crtc->mode_flags = mode_flags;
>  
>  	crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
> -#ifdef I915
> -	spin_unlock(&i915->uncore.lock);
> -#endif
> +	uncore_spin_unlock(&i915->uncore.lock);
>  	spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
>  }
> -- 
> 2.39.2
> 


More information about the Intel-xe mailing list