[Intel-xe] [PATCH v11 12/12] drm/xe: add lockdep annotation for xe_device_mem_access_get()

Thomas Hellström thomas.hellstrom at linux.intel.com
Tue Jun 13 09:42:48 UTC 2023


On Mon, 2023-06-12 at 18:12 +0100, Matthew Auld wrote:
> The atomics here might hide potential issues, so add a dummy lock
> with
> the idea that xe_pm_runtime_resume() is eventually going to be called
> when we are holding it. This only need to happen once and then
> lockdep
> can validate all callers and their locks.

This looks pretty similar to the fs_reclaim_[acquire|release]
functionality.

Instead of using a per-device mutex, could you use a static struct
lockdep map, similar to how __fs_reclaim_map is defined in
mm/page_alloc.c or dma_fence_lockdep_map?

Otherwise LGTM.

/Thomas


> 
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_device.c       | 20 ++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_device_types.h |  8 ++++++++
>  2 files changed, 28 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/xe_device.c
> b/drivers/gpu/drm/xe/xe_device.c
> index 1dc552da434f..3011a72da42f 100644
> --- a/drivers/gpu/drm/xe/xe_device.c
> +++ b/drivers/gpu/drm/xe/xe_device.c
> @@ -225,6 +225,8 @@ struct xe_device *xe_device_create(struct pci_dev
> *pdev,
>         if (WARN_ON(err))
>                 goto err_put;
>  
> +       drmm_mutex_init(&xe->drm, &xe->mem_access.lock);
> +
>         return xe;
>  
>  err_put:
> @@ -443,6 +445,22 @@ void xe_device_mem_access_get(struct xe_device
> *xe)
>         if (xe_pm_read_callback_task(xe) == current)
>                 return;
>  
> +       /*
> +        * Since the resume here is synchronous it can be quite easy
> to deadlock
> +        * if we are not careful. Also in practice it might be quite
> timing
> +        * sensitive to ever see the 0 -> 1 transition with the
> callers locks
> +        * held, so deadlocks might exist but are hard for lockdep to
> ever see.
> +        * With this in mind, help lockdep learn about the
> potentially scary
> +        * stuff that can happen inside the runtime_resume callback
> by acquiring
> +        * a dummy lock (it doesn't protect anything and gets
> compiled out on
> +        * non-debug builds).  Lockdep then only needs to see the
> +        * mem_access.lock -> runtime_resume callback once, and then
> can
> +        * hopefully validate all the (callers_locks) ->
> mem_access.lock. For
> +        * example if the (callers_locks) are ever grabbed in the
> runtime_resume
> +        * callback, lockdep should give us a nice splat.
> +        */
> +       lock_map_acquire(&xe->mem_access.lock.dep_map);
> +
>         if (!atomic_inc_not_zero(&xe->mem_access.ref)) {
>                 bool hold_rpm = xe_pm_runtime_resume_and_get(xe);
>                 int ref;
> @@ -455,6 +473,8 @@ void xe_device_mem_access_get(struct xe_device
> *xe)
>         } else {
>                 XE_WARN_ON(atomic_read(&xe->mem_access.ref) ==
> S32_MAX);
>         }
> +
> +       lock_map_release(&xe->mem_access.lock.dep_map);
>  }
>  
>  void xe_device_mem_access_put(struct xe_device *xe)
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h
> b/drivers/gpu/drm/xe/xe_device_types.h
> index a39f3d3f3bc7..82b80e3f2d44 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -323,6 +323,14 @@ struct xe_device {
>          * triggering additional actions when they occur.
>          */
>         struct {
> +               /**
> +                * @lock: Dummy lock used as lockdep aid to hopefully
> ensure
> +                * that lockep can more easily see any potential
> deadlocks when
> +                * calling xe_device_mem_access_get().
> +                *
> +                * Doesn't protect anything.
> +                */
> +               struct mutex lock;
>                 /** @ref: ref count of memory accesses */
>                 atomic_t ref;
>                 /** @hold_rpm: need to put rpm ref back at the end */



More information about the Intel-xe mailing list