[PATCH v8 06/11] drm/xe/exec_queue: Prepare last fence for hw engine group resume context

Matthew Brost matthew.brost at intel.com
Fri Aug 9 03:37:57 UTC 2024


On Thu, Aug 08, 2024 at 08:40:24PM +0200, Francois Dugast wrote:
> Ensure we can safely take a ref of the exec queue's last fence from the
> context of resuming jobs from the hw engine group. The locking requirements
> differ from the general case, hence the introduction of this new function.
> 
> v2: Add kernel doc, rework the code to prevent code duplication
> 
> v3: Fix kernel doc, remove now unnecessary lockdep variants (Matt Brost)
> 
> v4: Remove new put function (Matt Brost)
> 
> Signed-off-by: Francois Dugast <francois.dugast at intel.com>

Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> ---
>  drivers/gpu/drm/xe/xe_exec_queue.c | 33 ++++++++++++++++++++++++++++--
>  drivers/gpu/drm/xe/xe_exec_queue.h |  2 ++
>  2 files changed, 33 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 588cf5836798..abb2dfe7b1fd 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -819,10 +819,12 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
>  static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
>  						    struct xe_vm *vm)
>  {
> -	if (q->flags & EXEC_QUEUE_FLAG_VM)
> +	if (q->flags & EXEC_QUEUE_FLAG_VM) {
>  		lockdep_assert_held(&vm->lock);
> -	else
> +	} else {
>  		xe_vm_assert_held(vm);
> +		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
> +	}
>  }
>  
>  /**
> @@ -876,6 +878,33 @@ struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
>  	return fence;
>  }
>  
> +/**
> + * xe_exec_queue_last_fence_get_for_resume() - Get last fence
> + * @q: The exec queue
> + * @vm: The VM the engine does a bind or exec for
> + *
> + * Get last fence, takes a ref. Only safe to be called in the context of
> + * resuming the hw engine group's long-running exec queue, when the group
> + * semaphore is held.
> + *
> + * Returns: last fence if not signaled, dma fence stub if signaled
> + */
> +struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
> +							  struct xe_vm *vm)
> +{
> +	struct dma_fence *fence;
> +
> +	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
> +
> +	if (q->last_fence &&
> +	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
> +		xe_exec_queue_last_fence_put_unlocked(q);
> +
> +	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
> +	dma_fence_get(fence);
> +	return fence;
> +}
> +
>  /**
>   * xe_exec_queue_last_fence_set() - Set last fence
>   * @q: The exec queue
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
> index ded77b0f3b90..5dcc1a65e5cf 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.h
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.h
> @@ -73,6 +73,8 @@ void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
>  void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
>  struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
>  					       struct xe_vm *vm);
> +struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
> +							  struct xe_vm *vm);
>  void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
>  				  struct dma_fence *fence);
>  int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
> -- 
> 2.43.0
> 


More information about the Intel-xe mailing list