[PATCH 2/2] drm/xe: Do not use CT lock in TLB code

Summers, Stuart stuart.summers at intel.com
Wed Apr 16 19:24:02 UTC 2025


On Wed, 2025-04-16 at 18:30 +0000, Stuart Summers wrote:
> From: Nirmoy Das <nirmoy.das at intel.com>
> 
> Stop abusing CT lock for GT TLB invalidation fence seqno, add a
> dedicated lock.
> 
> Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>

Also agree here:
Reviewed-by: Stuart Summers <stuart.summers at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 17 ++++++++++-------
>  drivers/gpu/drm/xe/xe_gt_types.h            | 13 ++++++++++---
>  2 files changed, 20 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> index 031c4d43f36b..c2bb53362d74 100644
> --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> @@ -3,6 +3,8 @@
>   * Copyright © 2023 Intel Corporation
>   */
>  
> +#include <drm/drm_managed.h>
> +
>  #include "xe_gt_tlb_invalidation.h"
>  
>  #include "abi/guc_actions_abi.h"
> @@ -123,7 +125,8 @@ int xe_gt_tlb_invalidation_init_early(struct
> xe_gt *gt)
>         INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
>                           xe_gt_tlb_fence_timeout);
>  
> -       return 0;
> +       return drmm_mutex_init(&gt_to_xe(gt)->drm,
> +                              &gt->tlb_invalidation.seqno_lock);
>  }
>  
>  /**
> @@ -142,7 +145,7 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt
> *gt)
>          * appear.
>          */
>  
> -       mutex_lock(&gt->uc.guc.ct.lock);
> +       mutex_lock(&gt->tlb_invalidation.seqno_lock);
>         spin_lock_irq(&gt->tlb_invalidation.pending_lock);
>         cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
>         /*
> @@ -162,7 +165,7 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt
> *gt)
>                                  &gt-
> >tlb_invalidation.pending_fences, link)
>                 invalidation_fence_signal(gt_to_xe(gt), fence);
>         spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
> -       mutex_unlock(&gt->uc.guc.ct.lock);
> +       mutex_unlock(&gt->tlb_invalidation.seqno_lock);
>  }
>  
>  static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
> @@ -195,13 +198,13 @@ static int send_tlb_invalidation(struct xe_guc
> *guc,
>          * need to be updated.
>          */
>  
> -       mutex_lock(&guc->ct.lock);
> +       mutex_lock(&gt->tlb_invalidation.seqno_lock);
>         seqno = gt->tlb_invalidation.seqno;
>         fence->seqno = seqno;
>         trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
>         action[1] = seqno;
> -       ret = xe_guc_ct_send_locked(&guc->ct, action, len,
> -                                   G2H_LEN_DW_TLB_INVALIDATE, 1);
> +       ret = xe_guc_ct_send(&guc->ct, action, len,
> +                            G2H_LEN_DW_TLB_INVALIDATE, 1);
>         if (!ret) {
>                 spin_lock_irq(&gt->tlb_invalidation.pending_lock);
>                 /*
> @@ -232,7 +235,7 @@ static int send_tlb_invalidation(struct xe_guc
> *guc,
>                 if (!gt->tlb_invalidation.seqno)
>                         gt->tlb_invalidation.seqno = 1;
>         }
> -       mutex_unlock(&guc->ct.lock);
> +       mutex_unlock(&gt->tlb_invalidation.seqno_lock);
>         xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
>  
>         return ret;
> diff --git a/drivers/gpu/drm/xe/xe_gt_types.h
> b/drivers/gpu/drm/xe/xe_gt_types.h
> index be81687cbe2b..4736a608a881 100644
> --- a/drivers/gpu/drm/xe/xe_gt_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_types.h
> @@ -187,17 +187,24 @@ struct xe_gt {
>  
>         /** @tlb_invalidation: TLB invalidation state */
>         struct {
> -               /** @tlb_invalidation.seqno: TLB invalidation seqno,
> protected by CT lock */
> +               /** @tlb_invalidation.seqno_lock: TLB invalidation
> seqno lock */
> +               struct mutex seqno_lock;
> +               /**
> +                * @tlb_invalidation.seqno: TLB invalidation seqno,
> protected
> +                * by @tlb_invalidation.seqno_lock
> +                */
>  #define TLB_INVALIDATION_SEQNO_MAX     0x100000
>                 int seqno;
>                 /**
>                  * @tlb_invalidation.seqno_recv: last received TLB
> invalidation seqno,
> -                * protected by CT lock
> +                * protected by @tlb_invalidation.seqno_lock (send)
> and
> +                * @tlb_invalidation.pending_lock (send, recv)
>                  */
>                 int seqno_recv;
>                 /**
>                  * @tlb_invalidation.pending_fences: list of pending
> fences waiting TLB
> -                * invaliations, protected by CT lock
> +                * invaliations, protected by
> @tlb_invalidation.seqno_lock
> +                * (send) and @tlb_invalidation.pending_lock (send,
> recv)
>                  */
>                 struct list_head pending_fences;
>                 /**



More information about the Intel-xe mailing list