[Intel-xe] [PATCH v5 3/7] drm/xe: don't allocate under ct->lock

Rodrigo Vivi rodrigo.vivi at kernel.org
Wed May 17 15:50:08 UTC 2023


On Wed, May 17, 2023 at 04:22:40PM +0100, Matthew Auld wrote:
> Seems to be a sensitive lock, where ct->lock looks to be primed with
> fs_reclaim, so holding that and then allocating memory will cause
> lockdep to complain. We need to change the ordering wrt to grabbing the
> ct->lock and potentially grabbing the runtime_pm, since some of the
> runtime_pm routines can allocate memory (or at least that's what lockdep
> seems to suggest).
> 
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>

It is a good idea anyway to get the mem_access as early as possible.
Different from the locks that we want to limit the time of the critical
area.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c |  4 ++++
>  drivers/gpu/drm/xe/xe_guc_ct.c              | 13 +++++++------
>  2 files changed, 11 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> index c815a42e2cdb..20f8f0aae6b4 100644
> --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> @@ -5,6 +5,7 @@
>  
>  #include "xe_gt_tlb_invalidation.h"
>  
> +#include "xe_device.h"
>  #include "xe_gt.h"
>  #include "xe_guc.h"
>  #include "xe_guc_ct.h"
> @@ -112,6 +113,8 @@ static int send_tlb_invalidation(struct xe_guc *guc,
>  	 * in order which they currently are, if that changes the algorithm will
>  	 * need to be updated.
>  	 */
> +
> +	xe_device_mem_access_get(gt->xe);
>  	mutex_lock(&guc->ct.lock);
>  	seqno = gt->tlb_invalidation.seqno;
>  	if (fence) {
> @@ -140,6 +143,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
>  	if (ret < 0 && fence)
>  		invalidation_fence_signal(fence);
>  	mutex_unlock(&guc->ct.lock);
> +	xe_device_mem_access_put(gt->xe);
>  
>  	return ret;
>  }
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> index 9055ff133a7c..579d7f341f13 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> @@ -494,26 +494,22 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
>  		}
>  	}
>  
> -	xe_device_mem_access_get(ct_to_xe(ct));
>  retry:
>  	ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
>  	if (unlikely(ret))
> -		goto put_wa;
> +		goto out;
>  
>  	ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0,
>  			!!g2h_fence);
>  	if (unlikely(ret)) {
>  		if (ret == -EAGAIN)
>  			goto retry;
> -		goto put_wa;
> +		goto out;
>  	}
>  
>  	g2h_reserve_space(ct, g2h_len, num_g2h);
>  	xe_guc_notify(ct_to_guc(ct));
> -put_wa:
> -	xe_device_mem_access_put(ct_to_xe(ct));
>  out:
> -
>  	return ret;
>  }
>  
> @@ -535,6 +531,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
>  
>  	XE_BUG_ON(g2h_len && g2h_fence);
>  	lockdep_assert_held(&ct->lock);
> +	xe_device_assert_mem_access(ct_to_xe(ct));
>  
>  try_again:
>  	ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
> @@ -602,10 +599,14 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
>  
>  	XE_BUG_ON(g2h_len && g2h_fence);
>  
> +	xe_device_mem_access_get(ct_to_xe(ct));
> +
>  	mutex_lock(&ct->lock);
>  	ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
>  	mutex_unlock(&ct->lock);
>  
> +	xe_device_mem_access_put(ct_to_xe(ct));
> +
>  	return ret;
>  }
>  
> -- 
> 2.40.1
> 


More information about the Intel-xe mailing list