[Intel-xe] [PATCH 2/6] drm/xe/uapi: Kill DRM_XE_UFENCE_WAIT_VM_ERROR

Thomas Hellström thomas.hellstrom at linux.intel.com
Thu Sep 21 08:57:46 UTC 2023


On Thu, 2023-09-14 at 13:40 -0700, Matthew Brost wrote:
> This is not used nor does it align VM async document, kill this.
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>

Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>


> ---
>  drivers/gpu/drm/xe/xe_vm.c              |  3 --
>  drivers/gpu/drm/xe/xe_vm_types.h        | 11 -------
>  drivers/gpu/drm/xe/xe_wait_user_fence.c | 43 +++--------------------
> --
>  include/uapi/drm/xe_drm.h               | 17 +++-------
>  4 files changed, 9 insertions(+), 65 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 2a69302304e2..ea1f089549b1 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1455,9 +1455,6 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>                 xe_vma_destroy_unlocked(vma);
>         }
>  
> -       if (vm->async_ops.error_capture.addr)
> -               wake_up_all(&vm->async_ops.error_capture.wq);
> -
>         xe_assert(xe, list_empty(&vm->extobj.list));
>         up_write(&vm->lock);
>  
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> b/drivers/gpu/drm/xe/xe_vm_types.h
> index 52e5eaed91c3..3dc3da1386cb 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -222,17 +222,6 @@ struct xe_vm {
>                 struct work_struct work;
>                 /** @lock: protects list of pending async VM ops and
> fences */
>                 spinlock_t lock;
> -               /** @error_capture: error capture state */
> -               struct {
> -                       /** @mm: user MM */
> -                       struct mm_struct *mm;
> -                       /**
> -                        * @addr: user pointer to copy error capture
> state too
> -                        */
> -                       u64 addr;
> -                       /** @wq: user fence wait queue for VM errors
> */
> -                       wait_queue_head_t wq;
> -               } error_capture;
>                 /** @fence: fence state */
>                 struct {
>                         /** @context: context of async fence */
> diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c
> b/drivers/gpu/drm/xe/xe_wait_user_fence.c
> index 761eed3a022f..b47e9464c115 100644
> --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
> +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
> @@ -13,7 +13,6 @@
>  #include "xe_device.h"
>  #include "xe_gt.h"
>  #include "xe_macros.h"
> -#include "xe_vm.h"
>  
>  static int do_compare(u64 addr, u64 value, u64 mask, u16 op)
>  {
> @@ -81,8 +80,7 @@ static int check_hw_engines(struct xe_device *xe,
>  }
>  
>  #define VALID_FLAGS    (DRM_XE_UFENCE_WAIT_SOFT_OP | \
> -                        DRM_XE_UFENCE_WAIT_ABSTIME | \
> -                        DRM_XE_UFENCE_WAIT_VM_ERROR)
> +                        DRM_XE_UFENCE_WAIT_ABSTIME)
>  #define MAX_OP         DRM_XE_UFENCE_WAIT_LTE
>  
>  static unsigned long to_jiffies_timeout(struct
> drm_xe_wait_user_fence *args)
> @@ -109,11 +107,9 @@ int xe_wait_user_fence_ioctl(struct drm_device
> *dev, void *data,
>         struct drm_xe_engine_class_instance
> eci[XE_HW_ENGINE_MAX_INSTANCE];
>         struct drm_xe_engine_class_instance __user *user_eci =
>                 u64_to_user_ptr(args->instances);
> -       struct xe_vm *vm = NULL;
>         u64 addr = args->addr;
>         int err;
> -       bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP ||
> -               args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR;
> +       bool no_engines = args->flags & DRM_XE_UFENCE_WAIT_SOFT_OP;
>         unsigned long timeout;
>         ktime_t start;
>  
> @@ -134,8 +130,7 @@ int xe_wait_user_fence_ioctl(struct drm_device
> *dev, void *data,
>         if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines))
>                 return -EINVAL;
>  
> -       if (XE_IOCTL_DBG(xe, !(args->flags &
> DRM_XE_UFENCE_WAIT_VM_ERROR) &&
> -                        addr & 0x7))
> +       if (XE_IOCTL_DBG(xe, addr & 0x7))
>                 return -EINVAL;
>  
>         if (XE_IOCTL_DBG(xe, args->num_engines >
> XE_HW_ENGINE_MAX_INSTANCE))
> @@ -153,22 +148,6 @@ int xe_wait_user_fence_ioctl(struct drm_device
> *dev, void *data,
>                         return -EINVAL;
>         }
>  
> -       if (args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) {
> -               if (XE_IOCTL_DBG(xe, args->vm_id >> 32))
> -                       return -EINVAL;
> -
> -               vm = xe_vm_lookup(to_xe_file(file), args->vm_id);
> -               if (XE_IOCTL_DBG(xe, !vm))
> -                       return -ENOENT;
> -
> -               if (XE_IOCTL_DBG(xe, !vm-
> >async_ops.error_capture.addr)) {
> -                       xe_vm_put(vm);
> -                       return -EOPNOTSUPP;
> -               }
> -
> -               addr = vm->async_ops.error_capture.addr;
> -       }
> -
>         /*
>          * For negative timeout we want to wait "forever" by setting
>          * MAX_SCHEDULE_TIMEOUT. But we have to assign this value
> also
> @@ -188,15 +167,8 @@ int xe_wait_user_fence_ioctl(struct drm_device
> *dev, void *data,
>          * hardware engine. Open coding as 'do_compare' can sleep
> which doesn't
>          * work with the wait_event_* macros.
>          */
> -       if (vm)
> -               add_wait_queue(&vm->async_ops.error_capture.wq,
> &w_wait);
> -       else
> -               add_wait_queue(&xe->ufence_wq, &w_wait);
> +       add_wait_queue(&xe->ufence_wq, &w_wait);
>         for (;;) {
> -               if (vm && xe_vm_is_closed(vm)) {
> -                       err = -ENODEV;
> -                       break;
> -               }
>                 err = do_compare(addr, args->value, args->mask, args-
> >op);
>                 if (err <= 0)
>                         break;
> @@ -213,12 +185,7 @@ int xe_wait_user_fence_ioctl(struct drm_device
> *dev, void *data,
>  
>                 timeout = wait_woken(&w_wait, TASK_INTERRUPTIBLE,
> timeout);
>         }
> -       if (vm) {
> -               remove_wait_queue(&vm->async_ops.error_capture.wq,
> &w_wait);
> -               xe_vm_put(vm);
> -       } else {
> -               remove_wait_queue(&xe->ufence_wq, &w_wait);
> -       }
> +       remove_wait_queue(&xe->ufence_wq, &w_wait);
>  
>         if (!(args->flags & DRM_XE_UFENCE_WAIT_ABSTIME)) {
>                 args->timeout -= ktime_to_ns(ktime_sub(ktime_get(),
> start));
> diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> index 5cbbb433ce68..d0259865717a 100644
> --- a/include/uapi/drm/xe_drm.h
> +++ b/include/uapi/drm/xe_drm.h
> @@ -873,18 +873,10 @@ struct drm_xe_wait_user_fence {
>         /** @extensions: Pointer to the first extension struct, if
> any */
>         __u64 extensions;
>  
> -       union {
> -               /**
> -                * @addr: user pointer address to wait on, must qword
> aligned
> -                */
> -               __u64 addr;
> -
> -               /**
> -                * @vm_id: The ID of the VM which encounter an error
> used with
> -                * DRM_XE_UFENCE_WAIT_VM_ERROR. Upper 32 bits must be
> clear.
> -                */
> -               __u64 vm_id;
> -       };
> +       /**
> +        * @addr: user pointer address to wait on, must qword aligned
> +        */
> +       __u64 addr;
>  
>  #define DRM_XE_UFENCE_WAIT_EQ  0
>  #define DRM_XE_UFENCE_WAIT_NEQ 1
> @@ -897,7 +889,6 @@ struct drm_xe_wait_user_fence {
>  
>  #define DRM_XE_UFENCE_WAIT_SOFT_OP     (1 << 0)        /* e.g. Wait
> on VM bind */
>  #define DRM_XE_UFENCE_WAIT_ABSTIME     (1 << 1)
> -#define DRM_XE_UFENCE_WAIT_VM_ERROR    (1 << 2)
>         /** @flags: wait flags */
>         __u16 flags;
>  



More information about the Intel-xe mailing list