[PATCH v2 05/11] drm/xe: Add xe_gt_tlb_invalidation_done_handler

Summers, Stuart stuart.summers at intel.com
Wed Jul 23 17:22:12 UTC 2025


On Sun, 2024-07-07 at 21:03 -0700, Matthew Brost wrote:
> Decouple GT TLB seqno handling from G2H handler.
> 
> v2:
>  - Add kernel doc
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>

Looks good to me, thanks!

Reviewed-by: Stuart Summers <stuart.summers at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 47 +++++++++++++------
> --
>  1 file changed, 30 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> index 9062d9f85ffb..2493ea91b637 100644
> --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> @@ -412,27 +412,18 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt
> *gt,
>  }
>  
>  /**
> - * xe_guc_tlb_invalidation_done_handler - TLB invalidation done
> handler
> - * @guc: guc
> - * @msg: message indicating TLB invalidation done
> - * @len: length of message
> + * xe_gt_tlb_invalidation_done_handler - GT TLB invalidation done
> handler
> + * @gt: gt
> + * @seqno: seqno of invalidation that is done
>   *
> - * Parse seqno of TLB invalidation, wake any waiters for seqno, and
> signal any
> - * invalidation fences for seqno. Algorithm for this depends on
> seqno being
> - * received in-order and asserts this assumption.
> - *
> - * Return: 0 on success, -EPROTO for malformed messages.
> + * Update recv seqno, signal any GT TLB invalidation fences, and
> restart TDR
>   */
> -int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32
> *msg, u32 len)
> +static void xe_gt_tlb_invalidation_done_handler(struct xe_gt *gt,
> int seqno)
>  {
> -       struct xe_gt *gt = guc_to_gt(guc);
>         struct xe_device *xe = gt_to_xe(gt);
>         struct xe_gt_tlb_invalidation_fence *fence, *next;
>         unsigned long flags;
>  
> -       if (unlikely(len != 1))
> -               return -EPROTO;
> -
>         /*
>          * This can also be run both directly from the IRQ handler
> and also in
>          * process_g2h_msg(). Only one may process any individual CT
> message,
> @@ -449,12 +440,12 @@ int xe_guc_tlb_invalidation_done_handler(struct
> xe_guc *guc, u32 *msg, u32 len)
>          * process_g2h_msg().
>          */
>         spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
> -       if (tlb_invalidation_seqno_past(gt, msg[0])) {
> +       if (tlb_invalidation_seqno_past(gt, seqno)) {
>                 spin_unlock_irqrestore(&gt-
> >tlb_invalidation.pending_lock, flags);
> -               return 0;
> +               return;
>         }
>  
> -       WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
> +       WRITE_ONCE(gt->tlb_invalidation.seqno_recv, seqno);
>  
>         list_for_each_entry_safe(fence, next,
>                                  &gt-
> >tlb_invalidation.pending_fences, link) {
> @@ -474,6 +465,28 @@ int xe_guc_tlb_invalidation_done_handler(struct
> xe_guc *guc, u32 *msg, u32 len)
>                 cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
>  
>         spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock,
> flags);
> +}
> +
> +/**
> + * xe_guc_tlb_invalidation_done_handler - TLB invalidation done
> handler
> + * @guc: guc
> + * @msg: message indicating TLB invalidation done
> + * @len: length of message
> + *
> + * Parse seqno of TLB invalidation, wake any waiters for seqno, and
> signal any
> + * invalidation fences for seqno. Algorithm for this depends on
> seqno being
> + * received in-order and asserts this assumption.
> + *
> + * Return: 0 on success, -EPROTO for malformed messages.
> + */
> +int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32
> *msg, u32 len)
> +{
> +       struct xe_gt *gt = guc_to_gt(guc);
> +
> +       if (unlikely(len != 1))
> +               return -EPROTO;
> +
> +       xe_gt_tlb_invalidation_done_handler(gt, msg[0]);
>  
>         return 0;
>  }



More information about the Intel-xe mailing list