[Intel-xe] [PATCH 5/6] drm/xe: Allow num_binds == 0 in VM bind IOCTL

Thomas Hellström thomas.hellstrom at linux.intel.com
Thu Sep 21 09:32:17 UTC 2023


Hi, Matt!

On Thu, 2023-09-14 at 13:40 -0700, Matthew Brost wrote:
> The idea being out-syncs can signal indicating all previous
> operations
> on the bind queue are complete. An example use case of this would be
> support for implementing vkQueueWaitForIdle easily.
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>

One question below.

> ---
>  drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++++++++------------
>  1 file changed, 18 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 49c745d53b41..0e2f3ab453ea 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -2678,7 +2678,6 @@ static int vm_bind_ioctl_check_args(struct
> xe_device *xe,
>         int i;
>  
>         if (XE_IOCTL_DBG(xe, args->extensions) ||
> -           XE_IOCTL_DBG(xe, !args->num_binds) ||
>             XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
>                 return -EINVAL;
>  
> @@ -2805,7 +2804,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file)
>                         goto put_exec_queue;
>                 }
>  
> -               if (XE_IOCTL_DBG(xe, async !=
> +               if (XE_IOCTL_DBG(xe, args->num_binds && async !=
>                                  !!(q->flags &
> EXEC_QUEUE_FLAG_VM_ASYNC))) {
>                         err = -EINVAL;
>                         goto put_exec_queue;
> @@ -2819,7 +2818,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file)
>         }
>  
>         if (!args->exec_queue_id) {
> -               if (XE_IOCTL_DBG(xe, async !=
> +               if (XE_IOCTL_DBG(xe, args->num_binds && async !=
>                                  !!(vm->flags &
> XE_VM_FLAG_ASYNC_DEFAULT))) {
>                         err = -EINVAL;
>                         goto put_vm;
> @@ -2856,16 +2855,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file)
>                 }
>         }
>  
> -       bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
> -       if (!bos) {
> -               err = -ENOMEM;
> -               goto release_vm_lock;
> -       }
> +       if (args->num_binds) {
> +               bos = kzalloc(sizeof(*bos) * args->num_binds,
> GFP_KERNEL);
> +               if (!bos) {
> +                       err = -ENOMEM;
> +                       goto release_vm_lock;
> +               }
>  
> -       ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
> -       if (!ops) {
> -               err = -ENOMEM;
> -               goto release_vm_lock;
> +               ops = kzalloc(sizeof(*ops) * args->num_binds,
> GFP_KERNEL);
> +               if (!ops) {
> +                       err = -ENOMEM;
> +                       goto release_vm_lock;
> +               }
>         }
>  
>         for (i = 0; i < args->num_binds; ++i) {
> @@ -2920,6 +2921,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file)
>                         goto free_syncs;
>         }
>  
> +       if (!args->num_binds) {
> +               err = -ENODATA;
> +               goto free_syncs;
> +       }
> +

Hmm. Here it appears we reject num_binds == 0? 

>         for (i = 0; i < args->num_binds; ++i) {
>                 u64 range = bind_ops[i].range;
>                 u64 addr = bind_ops[i].addr;



More information about the Intel-xe mailing list