[Intel-xe] [PATCH 1/5] drm/xe: Allow num_binds == 0 in VM bind IOCTL

Matthew Brost matthew.brost at intel.com
Mon Nov 20 12:37:31 UTC 2023


On Thu, Nov 16, 2023 at 04:11:40PM -0700, Zanoni, Paulo R wrote:
> On Thu, 2023-11-16 at 20:53 +0000, Souza, Jose wrote:
> > On Thu, 2023-11-16 at 11:40 -0800, Matthew Brost wrote:
> > > The idea being out-syncs can signal indicating all previous operations
> > > on the bind queue are complete. An example use case of this would be
> > > support for implementing vkQueueWaitForIdle easily.
> > 
> > s/vkQueueWaitForIdle/vkQueueWaitIdle
> > 
> > So this is just for the bind queue.
> > Not sure if it is useful, Mesa will eventually move to async vm bind and we will use a single TIMELINE_SYNCOBJ to track what bind operations are
> > completed.
> > Paulo do you see any use with sparse?
> 
> We don't have plans to submit stuff to the Kernel with num_binds=0, but
> we do need the Kernel to support the case where we're trying to bind
> something that's already bound (so there's nothing for the Kernel to
> do, but num_binds > 0).
>

This series fixes an issue with num_binds > 0 but nothing to bind too.
 
> And yes to track operations that are already complete it's usually
> better to just check the syncobj (timeline or not) of the last thing
> you submitted.
> 
> Currently, due to the xe.ko bugs, our vm_bind code is completely
> synchronous, but the plan is to move it to async like we're doing with
> TR-TT.
> 
> I'm not discarding the possibility that we may have to do something
> extra for vkQueueWaitIdle, especially after we make it async, but I
> don't think submitting num_binds=0 will be the right choice.
> 

I'd like leave this option in the kernel as it is pretty easy to
implement and matches the behavior for execs with num batches == 0.

Matt

> > 
> > > 
> > > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > > Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/xe_vm.c | 30 ++++++++++++++++++------------
> > >  1 file changed, 18 insertions(+), 12 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index d45f4f1d490f..cd8c2332a22e 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -2850,7 +2850,6 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
> > >  	int i;
> > >  
> > >  	if (XE_IOCTL_DBG(xe, args->extensions) ||
> > > -	    XE_IOCTL_DBG(xe, !args->num_binds) ||
> > >  	    XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
> > >  		return -EINVAL;
> > >  
> > > @@ -2977,7 +2976,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > >  			goto put_exec_queue;
> > >  		}
> > >  
> > > -		if (XE_IOCTL_DBG(xe, async !=
> > > +		if (XE_IOCTL_DBG(xe, args->num_binds && async !=
> > >  				 !!(q->flags & EXEC_QUEUE_FLAG_VM_ASYNC))) {
> > >  			err = -EINVAL;
> > >  			goto put_exec_queue;
> > > @@ -2991,7 +2990,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > >  	}
> > >  
> > >  	if (!args->exec_queue_id) {
> > > -		if (XE_IOCTL_DBG(xe, async !=
> > > +		if (XE_IOCTL_DBG(xe, args->num_binds && async !=
> > >  				 !!(vm->flags & XE_VM_FLAG_ASYNC_DEFAULT))) {
> > >  			err = -EINVAL;
> > >  			goto put_vm;
> > > @@ -3028,16 +3027,18 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > >  		}
> > >  	}
> > >  
> > > -	bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
> > > -	if (!bos) {
> > > -		err = -ENOMEM;
> > > -		goto release_vm_lock;
> > > -	}
> > > +	if (args->num_binds) {
> > > +		bos = kzalloc(sizeof(*bos) * args->num_binds, GFP_KERNEL);
> > > +		if (!bos) {
> > > +			err = -ENOMEM;
> > > +			goto release_vm_lock;
> > > +		}
> > >  
> > > -	ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
> > > -	if (!ops) {
> > > -		err = -ENOMEM;
> > > -		goto release_vm_lock;
> > > +		ops = kzalloc(sizeof(*ops) * args->num_binds, GFP_KERNEL);
> > > +		if (!ops) {
> > > +			err = -ENOMEM;
> > > +			goto release_vm_lock;
> > > +		}
> > >  	}
> > >  
> > >  	for (i = 0; i < args->num_binds; ++i) {
> > > @@ -3092,6 +3093,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > >  			goto free_syncs;
> > >  	}
> > >  
> > > +	if (!args->num_binds) {
> > > +		err = -ENODATA;
> > > +		goto free_syncs;
> > > +	}
> > > +
> > >  	for (i = 0; i < args->num_binds; ++i) {
> > >  		u64 range = bind_ops[i].range;
> > >  		u64 addr = bind_ops[i].addr;
> > 
> 


More information about the Intel-xe mailing list