[PATCH v4 1/2] drm/xe: Userptr invalidation race with binds fixes

Thomas Hellström thomas.hellstrom at linux.intel.com
Tue Feb 25 14:30:54 UTC 2025


Hi, Matt,

On Mon, 2025-02-24 at 09:01 -0800, Matthew Brost wrote:
> Always wait on dma-resv bookkeep slots if userptr invalidation has
> raced
> with a bind ensuring PTEs temporally setup to invalidated pages are
> never accessed.
> 
> Fixup initial bind handling always add VMAs to invalidation list and
> wait dma-resv bookkeep slots.
> 
> Always hold notifier across TLB invalidation in notifier to prevent a
> UAF if an unbind races.
> 
> Including all of the above changes for Fixes patch in hopes of an
> easier
> backport which fix a single patch.
> 
> v2:
>  - Wait dma-resv bookkeep before issuing PTE zap (Thomas)
>  - Support scratch page on invalidation (Thomas)
> v3:
>  - Drop clear of PTEs (Thomas)

This was what I actually meant.

https://patchwork.freedesktop.org/patch/639489/?series=145409&rev=1

/Thomas

> v4:
>  - Remove double dma-resv wait
> 
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Cc: <stable at vger.kernel.org>
> Fixes: e8babb280b5e ("drm/xe: Convert multiple bind ops into single
> job")
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_pt.c | 21 ++++++++++++---------
>  drivers/gpu/drm/xe/xe_vm.c |  4 ++--
>  2 files changed, 14 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> index 1ddcc7e79a93..ffd23c3564c5 100644
> --- a/drivers/gpu/drm/xe/xe_pt.c
> +++ b/drivers/gpu/drm/xe/xe_pt.c
> @@ -1215,9 +1215,6 @@ static int vma_check_userptr(struct xe_vm *vm,
> struct xe_vma *vma,
>  	uvma = to_userptr_vma(vma);
>  	notifier_seq = uvma->userptr.notifier_seq;
>  
> -	if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
> -		return 0;
> -
>  	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
>  				     notifier_seq) &&
>  	    !xe_pt_userptr_inject_eagain(uvma))
> @@ -1226,6 +1223,8 @@ static int vma_check_userptr(struct xe_vm *vm,
> struct xe_vma *vma,
>  	if (xe_vm_in_fault_mode(vm)) {
>  		return -EAGAIN;
>  	} else {
> +		long err;
> +
>  		spin_lock(&vm->userptr.invalidated_lock);
>  		list_move_tail(&uvma->userptr.invalidate_link,
>  			       &vm->userptr.invalidated);
> @@ -1234,19 +1233,23 @@ static int vma_check_userptr(struct xe_vm
> *vm, struct xe_vma *vma,
>  		if (xe_vm_in_preempt_fence_mode(vm)) {
>  			struct dma_resv_iter cursor;
>  			struct dma_fence *fence;
> -			long err;
>  
>  			dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
>  					   
> DMA_RESV_USAGE_BOOKKEEP);
>  			dma_resv_for_each_fence_unlocked(&cursor,
> fence)
>  				dma_fence_enable_sw_signaling(fence)
> ;
>  			dma_resv_iter_end(&cursor);
> -
> -			err = dma_resv_wait_timeout(xe_vm_resv(vm),
> -						   
> DMA_RESV_USAGE_BOOKKEEP,
> -						    false,
> MAX_SCHEDULE_TIMEOUT);
> -			XE_WARN_ON(err <= 0);
>  		}
> +
> +		/*
> +		 * We are temporally installing PTEs pointing to
> invalidated
> +		 * pages, ensure VM is idle to avoid data
> corruption. PTEs fixed
> +		 * up upon next exec or in rebind worker.
> +		 */
> +		err = dma_resv_wait_timeout(xe_vm_resv(vm),
> +					    DMA_RESV_USAGE_BOOKKEEP,
> +					    false,
> MAX_SCHEDULE_TIMEOUT);
> +		XE_WARN_ON(err <= 0);
>  	}
>  
>  	return 0;
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 996000f2424e..9b2acb069a77 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -623,8 +623,6 @@ static bool vma_userptr_invalidate(struct
> mmu_interval_notifier *mni,
>  		spin_unlock(&vm->userptr.invalidated_lock);
>  	}
>  
> -	up_write(&vm->userptr.notifier_lock);
> -
>  	/*
>  	 * Preempt fences turn into schedule disables, pipeline
> these.
>  	 * Note that even in fault mode, we need to wait for binds
> and
> @@ -647,6 +645,8 @@ static bool vma_userptr_invalidate(struct
> mmu_interval_notifier *mni,
>  		XE_WARN_ON(err);
>  	}
>  
> +	up_write(&vm->userptr.notifier_lock);
> +
>  	trace_xe_vma_userptr_invalidate_complete(vma);
>  
>  	return true;



More information about the Intel-xe mailing list