[PATCH 2/2] drm/xe: Deny unbinds if uapi ufence pending
Matthew Brost
matthew.brost at intel.com
Thu Feb 15 17:21:04 UTC 2024
On Thu, Feb 15, 2024 at 06:40:21PM +0200, Mika Kuoppala wrote:
> If user fence was provided for MAP in vm_bind_ioctl
> and it has still not been signalled, deny UNMAP of said
> vma with EBUSY as long as unsignalled fence exists.
>
> This guarantees that MAP vs UNMAP sequences won't
> escape under the radar if we ever want to track the
> client's state wrt to completed and accessible MAPs.
> By means of intercepting the ufence release signalling.
>
> v2: find ufence with num_fences > 1 (Matt)
>
> Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1159
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> Signed-off-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 35 ++++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_vm_types.h | 7 +++++++
> 2 files changed, 42 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 836a6e849cda8..e9559aa80dc8b 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -897,6 +897,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
> struct xe_device *xe = vm->xe;
> bool read_only = xe_vma_read_only(vma);
>
> + if (vma->ufence) {
> + xe_sync_ufence_put(vma->ufence);
> + vma->ufence = NULL;
> + }
> +
> if (xe_vma_is_userptr(vma)) {
> struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
>
> @@ -1608,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
>
> trace_xe_vma_unbind(vma);
>
> + if (vma->ufence) {
> + struct xe_user_fence * const f = vma->ufence;
> +
> + if (!xe_sync_ufence_get_status(f))
> + return ERR_PTR(-EBUSY);
> +
> + vma->ufence = NULL;
> + xe_sync_ufence_put(f);
> + }
> +
> if (number_tiles > 1) {
> fences = kmalloc_array(number_tiles, sizeof(*fences),
> GFP_KERNEL);
> @@ -1741,6 +1756,21 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
> return ERR_PTR(err);
> }
>
> +static struct xe_user_fence *
> +find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < num_syncs; i++) {
> + struct xe_sync_entry *e = &syncs[i];
> +
> + if (xe_sync_is_ufence(e))
> + return xe_sync_ufence_get(e);
> + }
> +
> + return NULL;
> +}
> +
> static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
> struct xe_exec_queue *q, struct xe_sync_entry *syncs,
> u32 num_syncs, bool immediate, bool first_op,
> @@ -1751,6 +1781,11 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
>
> xe_vm_assert_held(vm);
>
> + if (vma->ufence)
This path can be triggered by rebinds (in exec path, preempt rebind
worker, or page faults. Those paths could be triggered when a bind is
pending, have no syncs, and we do not want to blow any an existing
vma->ufence.
How about?
ufence = find_ufence_get(syncs, num_syncs);
if (vma->ufence && ufence)
xe_sync_ufence_put(vma->ufence);
vma->ufence = ufence ?: vma->ufence;
Matt
> + xe_sync_ufence_put(vma->ufence);
> +
> + vma->ufence = find_ufence_get(syncs, num_syncs);
> +
> if (immediate) {
> fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
> last_op);
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index 5ac9c5bebabc3..4a06420b941ea 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -19,6 +19,7 @@
>
> struct xe_bo;
> struct xe_sync_entry;
> +struct xe_user_fence;
> struct xe_vm;
>
> #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
> @@ -102,6 +103,12 @@ struct xe_vma {
> * @pat_index: The pat index to use when encoding the PTEs for this vma.
> */
> u16 pat_index;
> +
> + /**
> + * @ufence: The user fence that was provided with MAP.
> + * Needs to be signalled before UNMAP can be processed.
> + */
> + struct xe_user_fence *ufence;
> };
>
> /**
> --
> 2.34.1
>
More information about the Intel-xe
mailing list