[PATCH 2/2] drm/xe: RFC Deny unbinds if uapi ufence pending

Mika Kuoppala mika.kuoppala at linux.intel.com
Wed Feb 14 14:15:58 UTC 2024


Mika Kuoppala <mika.kuoppala at linux.intel.com> writes:

> If user fence was provided for MAP in vm_bind_ioctl
> and it has still not been signalled, deny UNMAP of said
> vma with EBUSY as long as unsignalled fence exists.
>
> This guarantees that MAP vs UNMAP sequences won't
> escape under the radar if we ever want to track the
> client's state wrt to completed and accessible MAPs.
> By means of intercepting the ufence release signalling.
>
> Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1159
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c       | 22 ++++++++++++++++++++++
>  drivers/gpu/drm/xe/xe_vm_types.h |  7 +++++++
>  2 files changed, 29 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 836a6e849cda8..c26297568e697 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -897,6 +897,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
>  	struct xe_device *xe = vm->xe;
>  	bool read_only = xe_vma_read_only(vma);
>  
> +	if (vma->ufence) {
> +		xe_sync_ufence_put(vma->ufence);
> +		vma->ufence = NULL;
> +	}
> +
>  	if (xe_vma_is_userptr(vma)) {
>  		struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
>  
> @@ -1608,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
>  
>  	trace_xe_vma_unbind(vma);
>  
> +	if (vma->ufence) {
> +		struct xe_user_fence * const f = vma->ufence;
> +
> +		if (!xe_sync_ufence_get_status(f))
> +			return ERR_PTR(-EBUSY);
> +
> +		vma->ufence = NULL;
> +		xe_sync_ufence_put(f);
> +	}
> +
>  	if (number_tiles > 1) {
>  		fences = kmalloc_array(number_tiles, sizeof(*fences),
>  				       GFP_KERNEL);
> @@ -1751,6 +1766,13 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
>  
>  	xe_vm_assert_held(vm);
>  
> +	if (num_syncs == 1 && xe_sync_is_ufence(&syncs[0])) {
> +		if (XE_WARN_ON(vma->ufence))

This will get triggered with xe_exec_threads: threads-cm-userptr.

Should we just silently swap into a newer provided ufence
and only complain if the existing fence in here is still non signalled?

-Mika

> +			xe_sync_ufence_put(vma->ufence);
> +
> +		vma->ufence = xe_sync_ufence_get(&syncs[0]);
> +	}
> +
>  	if (immediate) {
>  		fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
>  				       last_op);
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index 5ac9c5bebabc3..4a06420b941ea 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -19,6 +19,7 @@
>  
>  struct xe_bo;
>  struct xe_sync_entry;
> +struct xe_user_fence;
>  struct xe_vm;
>  
>  #define XE_VMA_READ_ONLY	DRM_GPUVA_USERBITS
> @@ -102,6 +103,12 @@ struct xe_vma {
>  	 * @pat_index: The pat index to use when encoding the PTEs for this vma.
>  	 */
>  	u16 pat_index;
> +
> +	/**
> +	 * @ufence: The user fence that was provided with MAP.
> +	 * Needs to be signalled before UNMAP can be processed.
> +	 */
> +	struct xe_user_fence *ufence;
>  };
>  
>  /**
> -- 
> 2.34.1


More information about the Intel-xe mailing list