[PATCH v3] drm/xe/guc: Configure TLB timeout based on CT buffer size

Matthew Brost matthew.brost at intel.com
Wed Jun 26 21:55:38 UTC 2024


On Wed, Jun 26, 2024 at 05:01:09PM +0200, Nirmoy Das wrote:
> GuC TLB invalidation depends on GuC to process the request from the CT
> queue and then the real time to invalidate TLB. Add a function to return
> overestimated possible time a TLB inval H2G might take which can be used
> as timeout value for TLB invalidation wait time.
> 
> v3: Pass CT to xe_guc_ct_queue_proc_time_jiffies() (Michal)
>     Add tlb_timeout_jiffies() that replaces TLB_TIMEOUT(Michal)
> v2: Address reviews from Michal.
> 
> Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1622
> Cc: Matthew Brost <matthew.brost at intel.com>

Since this is under review by other, I'll leave the RB to them:
Acked-by: Matthew Brost <matthew.brost at intel.com>

> Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
> Suggested-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
> Signed-off-by: Nirmoy Das <nirmoy.das at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 30 +++++++++++++++------
>  drivers/gpu/drm/xe/xe_guc_ct.c              | 16 +++++++++++
>  drivers/gpu/drm/xe/xe_guc_ct.h              |  2 ++
>  3 files changed, 40 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> index e1f1ccb01143..d509b72a6d89 100644
> --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
> @@ -17,7 +17,22 @@
>  #include "xe_trace.h"
>  #include "regs/xe_guc_regs.h"
>  
> -#define TLB_TIMEOUT	(HZ / 4)
> +/**
> + * TLB inval depends on pending commands in the CT queue and then the real
> + * invalidation time. Double up the time to process full CT queue
> + * just to be on the safe side.
> + */
> +static long tlb_timeout_jiffies(struct xe_gt *gt)
> +{
> +	/* this reflects what HW/GuC needs to process TLB inv request */
> +	const long hw_tlb_timeout = HZ / 4;
> +
> +	/* this estimates actual delay caused by the CTB transport */
> +	long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
> +
> +	return hw_tlb_timeout + 2 * delay;
> +}
> +
>  
>  static void xe_gt_tlb_fence_timeout(struct work_struct *work)
>  {
> @@ -32,7 +47,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
>  		s64 since_inval_ms = ktime_ms_delta(ktime_get(),
>  						    fence->invalidation_time);
>  
> -		if (msecs_to_jiffies(since_inval_ms) < TLB_TIMEOUT)
> +		if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
>  			break;
>  
>  		trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
> @@ -47,7 +62,7 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
>  	if (!list_empty(&gt->tlb_invalidation.pending_fences))
>  		queue_delayed_work(system_wq,
>  				   &gt->tlb_invalidation.fence_tdr,
> -				   TLB_TIMEOUT);
> +				   tlb_timeout_jiffies(gt));
>  	spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
>  }
>  
> @@ -183,7 +198,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
>  			if (list_is_singular(&gt->tlb_invalidation.pending_fences))
>  				queue_delayed_work(system_wq,
>  						   &gt->tlb_invalidation.fence_tdr,
> -						   TLB_TIMEOUT);
> +						   tlb_timeout_jiffies(gt));
>  		}
>  		spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
>  	} else if (ret < 0 && fence) {
> @@ -390,8 +405,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
>   * @gt: graphics tile
>   * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
>   *
> - * Wait for 200ms for a TLB invalidation to complete, in practice we always
> - * should receive the TLB invalidation within 200ms.
> + * Wait for tlb_timeout_jiffies() for a TLB invalidation to complete.
>   *
>   * Return: 0 on success, -ETIME on TLB invalidation timeout
>   */
> @@ -410,7 +424,7 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
>  	 */
>  	ret = wait_event_timeout(guc->ct.wq,
>  				 tlb_invalidation_seqno_past(gt, seqno),
> -				 TLB_TIMEOUT);
> +				 tlb_timeout_jiffies(gt));
>  	if (!ret) {
>  		struct drm_printer p = xe_gt_err_printer(gt);
>  
> @@ -486,7 +500,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
>  	if (!list_empty(&gt->tlb_invalidation.pending_fences))
>  		mod_delayed_work(system_wq,
>  				 &gt->tlb_invalidation.fence_tdr,
> -				 TLB_TIMEOUT);
> +				 tlb_timeout_jiffies(gt));
>  	else
>  		cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
>  
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> index 873d1bcbedd7..df95b0e878ad 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> @@ -112,6 +112,22 @@ ct_to_xe(struct xe_guc_ct *ct)
>  #define CTB_G2H_BUFFER_SIZE	(4 * CTB_H2G_BUFFER_SIZE)
>  #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 4)
>  
> +/**
> + * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
> + * CT command queue
> + * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
> + *
> + * Observation is that A 4KiB buffer full of commands takes a little over a
> + * second to process. Use that to calculate maximum time to process a full CT
> + * command queue.
> + *
> + * Return: Maximum time to process a full CT queue in jiffies.
> + */
> +long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
> +{
> +	return (CTB_H2G_BUFFER_SIZE * HZ) / SZ_4K;
> +}
> +
>  static size_t guc_ct_size(void)
>  {
>  	return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
> diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
> index 105bb8e99a8d..190202fce2d0 100644
> --- a/drivers/gpu/drm/xe/xe_guc_ct.h
> +++ b/drivers/gpu/drm/xe/xe_guc_ct.h
> @@ -64,4 +64,6 @@ xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len)
>  	return xe_guc_ct_send_recv_no_fail(ct, action, len, NULL);
>  }
>  
> +long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct);
> +
>  #endif
> -- 
> 2.42.0
> 


More information about the Intel-xe mailing list