[Intel-xe] [PATCH] drm/xe: Reinstate pipelined fence enable_signaling

Matthew Brost matthew.brost at intel.com
Thu Sep 14 15:35:26 UTC 2023


On Thu, Sep 14, 2023 at 09:53:24AM +0200, Thomas Hellström wrote:
> With the GPUVA conversion, the xe_bo::vmas member became replaced with
> drm_gem_object::gpuva.list, however there was a couple of usage instances
> left using the old member. Most notably the pipelined fence
> enable_signaling.
> 
> Remove the xe_bo::vmas member completely, fix usage instances and
> also enable this pipelined fence enable_signaling even for faulting
> VM:s since we actually wait for bind fences to complete.
> 
> Cc: Matthew Brost <matthew.brost at intel.com>

Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_bo.c       | 5 ++---
>  drivers/gpu/drm/xe/xe_bo_types.h | 2 --
>  drivers/gpu/drm/xe/xe_pt.c       | 2 +-
>  3 files changed, 3 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index 25fdc04627ca..3fdd0bab2fd5 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -455,7 +455,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
>  
>  	dma_resv_assert_held(bo->ttm.base.resv);
>  
> -	if (!xe_device_in_fault_mode(xe) && !list_empty(&bo->vmas)) {
> +	if (!list_empty(&bo->ttm.base.gpuva.list)) {
>  		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
>  				    DMA_RESV_USAGE_BOOKKEEP);
>  		dma_resv_for_each_fence_unlocked(&cursor, fence)
> @@ -1043,7 +1043,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
>  		drm_prime_gem_destroy(&bo->ttm.base, NULL);
>  	drm_gem_object_release(&bo->ttm.base);
>  
> -	WARN_ON(!list_empty(&bo->vmas));
> +	WARN_ON(!list_empty(&ttm_bo->base.gpuva.list));
>  
>  	if (bo->ggtt_node.size)
>  		xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
> @@ -1226,7 +1226,6 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
>  	bo->props.preferred_gt = XE_BO_PROPS_INVALID;
>  	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
>  	bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
> -	INIT_LIST_HEAD(&bo->vmas);
>  	INIT_LIST_HEAD(&bo->pinned_link);
>  
>  	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
> diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
> index f6ee920303af..27fe72129ee6 100644
> --- a/drivers/gpu/drm/xe/xe_bo_types.h
> +++ b/drivers/gpu/drm/xe/xe_bo_types.h
> @@ -31,8 +31,6 @@ struct xe_bo {
>  	struct xe_vm *vm;
>  	/** @tile: Tile this BO is attached to (kernel BO only) */
>  	struct xe_tile *tile;
> -	/** @vmas: List of VMAs for this BO */
> -	struct list_head vmas;
>  	/** @placements: valid placements for this BO */
>  	struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
>  	/** @placement: current placement for this BO */
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index 5709518e314b..5e4d8f89d8e1 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -265,7 +265,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
>  	if (!pt)
>  		return;
>  
> -	XE_WARN_ON(!list_empty(&pt->bo->vmas));
> +	XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
>  	xe_bo_unpin(pt->bo);
>  	xe_bo_put_deferred(pt->bo, deferred);
>  
> -- 
> 2.41.0
> 


More information about the Intel-xe mailing list