[PATCH 5/5] Revert "drm/xe: Do not access xe file when updating exec queue run_ticks"

Matthew Brost matthew.brost at intel.com
Mon Jul 8 21:33:40 UTC 2024


On Mon, Jul 08, 2024 at 01:21:03PM -0700, Umesh Nerlige Ramappa wrote:
> This reverts commit ce62827bc294ba5f8b3909bfa5d7dbf9de8aab6b.
> 
> Closes: https://gitlab.freedesktop.org/drm/xe/kernel/issues/1908
> Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>

This looks right once the rest of my feedback from the series is addressed.

With that:
Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_drm_client.c       | 5 +----
>  drivers/gpu/drm/xe/xe_exec_queue.c       | 5 ++++-
>  drivers/gpu/drm/xe/xe_exec_queue_types.h | 4 ----
>  3 files changed, 5 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
> index 6a26923fa10e..7ddd59908334 100644
> --- a/drivers/gpu/drm/xe/xe_drm_client.c
> +++ b/drivers/gpu/drm/xe/xe_drm_client.c
> @@ -251,11 +251,8 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
>  
>  	/* Accumulate all the exec queues from this client */
>  	mutex_lock(&xef->exec_queue.lock);
> -	xa_for_each(&xef->exec_queue.xa, i, q) {
> +	xa_for_each(&xef->exec_queue.xa, i, q)
>  		xe_exec_queue_update_run_ticks(q);
> -		xef->run_ticks[q->class] += q->run_ticks - q->old_run_ticks;
> -		q->old_run_ticks = q->run_ticks;
> -	}
>  	mutex_unlock(&xef->exec_queue.lock);
>  
>  	/* Get the total GPU cycles */
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index b01eccdd75ba..719a70878fe2 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -768,6 +768,7 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
>   */
>  void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
>  {
> +	struct xe_file *xef;
>  	struct xe_lrc *lrc;
>  	u32 old_ts, new_ts;
>  
> @@ -779,6 +780,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
>  	if (!q->vm || !q->vm->xef)
>  		return;
>  
> +	xef = q->vm->xef;
> +
>  	/*
>  	 * Only sample the first LRC. For parallel submission, all of them are
>  	 * scheduled together and we compensate that below by multiplying by
> @@ -789,7 +792,7 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
>  	 */
>  	lrc = q->lrc[0];
>  	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
> -	q->run_ticks += (new_ts - old_ts) * q->width;
> +	xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
>  }
>  
>  void xe_exec_queue_kill(struct xe_exec_queue *q)
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> index f768e48f31e4..b5343cdd0632 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> @@ -142,10 +142,6 @@ struct xe_exec_queue {
>  	 * Protected by @vm's resv. Unused if @vm == NULL.
>  	 */
>  	u64 tlb_flush_seqno;
> -	/** @old_run_ticks: prior hw engine class run time in ticks for this exec queue */
> -	u64 old_run_ticks;
> -	/** @run_ticks: hw engine class run time in ticks for this exec queue */
> -	u64 run_ticks;
>  	/** @lrc: logical ring context for this exec queue */
>  	struct xe_lrc *lrc[];
>  };
> -- 
> 2.38.1
> 


More information about the Intel-xe mailing list