[PATCH 15/15] drm/xe: Convert pinned suspend eviction for exhaustive eviction

Thomas Hellström thomas.hellstrom at linux.intel.com
Fri Aug 15 15:29:36 UTC 2025


On Thu, 2025-08-14 at 13:30 -0700, Matthew Brost wrote:
> On Wed, Aug 13, 2025 at 12:51:21PM +0200, Thomas Hellström wrote:
> > Pinned suspend eviction and preparation for eviction validates
> > system memory for eviction buffers. Do that under a
> > validation exclusive lock to avoid interfering with other
> > processes validating system graphics memory.
> > 
> > Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_bo.c | 205 +++++++++++++++++++--------------
> > ----
> >  1 file changed, 108 insertions(+), 97 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_bo.c
> > b/drivers/gpu/drm/xe/xe_bo.c
> > index 82bf158426ad..efb9c88b6aa7 100644
> > --- a/drivers/gpu/drm/xe/xe_bo.c
> > +++ b/drivers/gpu/drm/xe/xe_bo.c
> > @@ -1139,43 +1139,47 @@ long xe_bo_shrink(struct ttm_operation_ctx
> > *ctx, struct ttm_buffer_object *bo,
> >  int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
> >  {
> >  	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
> > -	struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
> > +	struct xe_validation_ctx ctx;
> > +	struct drm_exec exec;
> >  	struct xe_bo *backup;
> >  	int ret = 0;
> >  
> > -	xe_bo_lock(bo, false);
> > +	xe_validation_guard(&ctx, &xe->val, &exec, 0, ret, true) {
> > +		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
> > +		drm_exec_retry_on_contention(&exec);
> > +		xe_assert(xe, !ret);
> > +		xe_assert(xe, !bo->backup_obj);
> >  
> > -	xe_assert(xe, !bo->backup_obj);
> > +		/*
> > +		 * Since this is called from the PM notifier we
> > might have raced with
> > +		 * someone unpinning this after we dropped the
> > pinned list lock and
> > +		 * grabbing the above bo lock.
> > +		 */
> > +		if (!xe_bo_is_pinned(bo))
> > +			break;
> >  
> > -	/*
> > -	 * Since this is called from the PM notifier we might have
> > raced with
> > -	 * someone unpinning this after we dropped the pinned list
> > lock and
> > -	 * grabbing the above bo lock.
> > -	 */
> > -	if (!xe_bo_is_pinned(bo))
> > -		goto out_unlock_bo;
> > +		if (!xe_bo_is_vram(bo))
> > +			break;
> >  
> > -	if (!xe_bo_is_vram(bo))
> > -		goto out_unlock_bo;
> > +		if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
> > +			break;
> >  
> > -	if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
> > -		goto out_unlock_bo;
> > +		backup = xe_bo_init_locked(xe, NULL, NULL, bo-
> > >ttm.base.resv, NULL, xe_bo_size(bo),
> > +					  
> > DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
> > +					   XE_BO_FLAG_SYSTEM |
> > XE_BO_FLAG_NEEDS_CPU_ACCESS |
> > +					   XE_BO_FLAG_PINNED,
> > &exec);
> > +		if (IS_ERR(backup)) {
> > +			drm_exec_retry_on_contention(&exec);
> > +			ret = PTR_ERR(backup);
> > +			xe_validation_retry_on_oom(&ctx, &ret);
> > +			break;
> > +		}
> >  
> > -	backup = xe_bo_init_locked(xe, NULL, NULL, bo-
> > >ttm.base.resv, NULL, xe_bo_size(bo),
> > -				   DRM_XE_GEM_CPU_CACHING_WB,
> > ttm_bo_type_kernel,
> > -				   XE_BO_FLAG_SYSTEM |
> > XE_BO_FLAG_NEEDS_CPU_ACCESS |
> > -				   XE_BO_FLAG_PINNED, exec);
> > -	if (IS_ERR(backup)) {
> > -		ret = PTR_ERR(backup);
> > -		goto out_unlock_bo;
> > +		backup->parent_obj = xe_bo_get(bo); /* Released by
> > bo_destroy */
> > +		ttm_bo_pin(&backup->ttm);
> > +		bo->backup_obj = backup;
> >  	}
> >  
> > -	backup->parent_obj = xe_bo_get(bo); /* Released by
> > bo_destroy */
> > -	ttm_bo_pin(&backup->ttm);
> > -	bo->backup_obj = backup;
> > -
> > -out_unlock_bo:
> > -	xe_bo_unlock(bo);
> >  	return ret;
> >  }
> >  
> > @@ -1215,99 +1219,106 @@ int xe_bo_notifier_unprepare_pinned(struct
> > xe_bo *bo)
> >  int xe_bo_evict_pinned(struct xe_bo *bo)
> >  {
> >  	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
> > -	struct drm_exec *exec = XE_VALIDATION_UNIMPLEMENTED;
> > +	struct xe_validation_ctx ctx;
> > +	struct drm_exec exec;
> >  	struct xe_bo *backup = bo->backup_obj;
> >  	bool backup_created = false;
> >  	bool unmap = false;
> >  	int ret = 0;
> >  
> > -	xe_bo_lock(bo, false);
> > +	xe_validation_guard(&ctx, &xe->val, &exec, 0, ret, true) {
> > +		ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
> > +		drm_exec_retry_on_contention(&exec);
> > +		xe_assert(xe, !ret);
> >  
> > -	if (WARN_ON(!bo->ttm.resource)) {
> > -		ret = -EINVAL;
> > -		goto out_unlock_bo;
> > -	}
> > +		if (WARN_ON(!bo->ttm.resource)) {
> > +			ret = -EINVAL;
> > +			break;
> > +		}
> >  
> > -	if (WARN_ON(!xe_bo_is_pinned(bo))) {
> > -		ret = -EINVAL;
> > -		goto out_unlock_bo;
> > -	}
> > +		if (WARN_ON(!xe_bo_is_pinned(bo))) {
> > +			ret = -EINVAL;
> > +			break;
> > +		}
> >  
> > -	if (!xe_bo_is_vram(bo))
> > -		goto out_unlock_bo;
> > +		if (!xe_bo_is_vram(bo))
> > +			break;
> >  
> > -	if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
> > -		goto out_unlock_bo;
> > +		if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
> > +			break;
> >  
> > -	if (!backup) {
> > -		backup = xe_bo_init_locked(xe, NULL, NULL, bo-
> > >ttm.base.resv, NULL, xe_bo_size(bo),
> > -					  
> > DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
> > -					   XE_BO_FLAG_SYSTEM |
> > XE_BO_FLAG_NEEDS_CPU_ACCESS |
> > -					   XE_BO_FLAG_PINNED,
> > exec);
> > -		if (IS_ERR(backup)) {
> > -			ret = PTR_ERR(backup);
> > -			goto out_unlock_bo;
> > +		if (!backup) {
> > +			backup = xe_bo_init_locked(xe, NULL, NULL,
> > bo->ttm.base.resv, NULL,
> > +						   xe_bo_size(bo),
> > +						  
> > DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
> > +						  
> > XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
> > +						  
> > XE_BO_FLAG_PINNED, &exec);
> > +			if (IS_ERR(backup)) {
> > +				drm_exec_retry_on_contention(&exec
> > );
> > +				ret = PTR_ERR(backup);
> > +				xe_validation_retry_on_oom(&ctx,
> > &ret);
> > +				break;
> > +			}
> > +			backup->parent_obj = xe_bo_get(bo); /*
> > Released by bo_destroy */
> > +			backup_created = true;
> >  		}
> > -		backup->parent_obj = xe_bo_get(bo); /* Released by
> > bo_destroy */
> > -		backup_created = true;
> > -	}
> >  
> > -	if (xe_bo_is_user(bo) || (bo->flags &
> > XE_BO_FLAG_PINNED_LATE_RESTORE)) {
> > -		struct xe_migrate *migrate;
> > -		struct dma_fence *fence;
> > -
> > -		if (bo->tile)
> > -			migrate = bo->tile->migrate;
> > -		else
> > -			migrate = mem_type_to_migrate(xe, bo-
> > >ttm.resource->mem_type);
> > +		if (xe_bo_is_user(bo) || (bo->flags &
> > XE_BO_FLAG_PINNED_LATE_RESTORE)) {
> > +			struct xe_migrate *migrate;
> > +			struct dma_fence *fence;
> >  
> > -		ret = dma_resv_reserve_fences(bo->ttm.base.resv,
> > 1);
> > -		if (ret)
> > -			goto out_backup;
> > +			if (bo->tile)
> > +				migrate = bo->tile->migrate;
> > +			else
> > +				migrate = mem_type_to_migrate(xe,
> > bo->ttm.resource->mem_type);
> >  
> > -		ret = dma_resv_reserve_fences(backup-
> > >ttm.base.resv, 1);
> > -		if (ret)
> > -			goto out_backup;
> > +			ret = dma_resv_reserve_fences(bo-
> > >ttm.base.resv, 1);
> > +			if (ret)
> > +				goto out_backup;
> >  
> > -		fence = xe_migrate_copy(migrate, bo, backup, bo-
> > >ttm.resource,
> > -					backup->ttm.resource,
> > false);
> > -		if (IS_ERR(fence)) {
> > -			ret = PTR_ERR(fence);
> > -			goto out_backup;
> > -		}
> > +			ret = dma_resv_reserve_fences(backup-
> > >ttm.base.resv, 1);
> > +			if (ret)
> > +				goto out_backup;
> >  
> > -		dma_resv_add_fence(bo->ttm.base.resv, fence,
> > -				   DMA_RESV_USAGE_KERNEL);
> > -		dma_resv_add_fence(backup->ttm.base.resv, fence,
> > -				   DMA_RESV_USAGE_KERNEL);
> > -		dma_fence_put(fence);
> > -	} else {
> > -		ret = xe_bo_vmap(backup);
> > -		if (ret)
> > -			goto out_backup;
> > +			fence = xe_migrate_copy(migrate, bo,
> > backup, bo->ttm.resource,
> > +						backup-
> > >ttm.resource, false);
> > +			if (IS_ERR(fence)) {
> > +				ret = PTR_ERR(fence);
> > +				goto out_backup;
> > +			}
> >  
> > -		if (iosys_map_is_null(&bo->vmap)) {
> > -			ret = xe_bo_vmap(bo);
> > +			dma_resv_add_fence(bo->ttm.base.resv,
> > fence,
> > +					   DMA_RESV_USAGE_KERNEL);
> > +			dma_resv_add_fence(backup->ttm.base.resv,
> > fence,
> > +					   DMA_RESV_USAGE_KERNEL);
> > +			dma_fence_put(fence);
> > +		} else {
> > +			ret = xe_bo_vmap(backup);
> >  			if (ret)
> >  				goto out_backup;
> > -			unmap = true;
> > -		}
> >  
> > -		xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo-
> > >vmap, 0,
> > -				   xe_bo_size(bo));
> > -	}
> > +			if (iosys_map_is_null(&bo->vmap)) {
> > +				ret = xe_bo_vmap(bo);
> > +				if (ret)
> > +					goto out_vunmap;
> > +				unmap = true;
> > +			}
> >  
> > -	if (!bo->backup_obj)
> > -		bo->backup_obj = backup;
> > +			xe_map_memcpy_from(xe, backup->vmap.vaddr,
> > &bo->vmap, 0,
> > +					   xe_bo_size(bo));
> > +		}
> >  
> > +		if (!bo->backup_obj)
> > +			bo->backup_obj = backup;
> > +out_vunmap:
> 
> I just want to confirm that this is safe. The cleanup.h documentation
> discourages the use of goto because of scoping issues. I assume that
> since this label is within the scope of the guard, it is fine.
> 
> It might be worth adding a quick note in the validation guard’s
> kernel-doc mentioning that goto can be dangerous, explaining what is
> allowed, and perhaps referencing the cleanup.h documentation. I could
> see this being something developers might trip over.
> 

Yes you are correct. I'll avoid the gotos in v2.

/Thomas



> Patch LGTM, though.
> 
> Matt
> 
> > +		xe_bo_vunmap(backup);
> >  out_backup:
> > -	xe_bo_vunmap(backup);
> > -	if (ret && backup_created)
> > -		xe_bo_put(backup);
> > -out_unlock_bo:
> > -	if (unmap)
> > -		xe_bo_vunmap(bo);
> > -	xe_bo_unlock(bo);
> > +		if (ret && backup_created)
> > +			xe_bo_put(backup);
> > +		if (unmap)
> > +			xe_bo_vunmap(bo);
> > +	}
> > +
> >  	return ret;
> >  }
> >  
> > -- 
> > 2.50.1
> > 



More information about the Intel-xe mailing list