[PATCH 06/13] drm/xe: Simplify VM bind IOCTL error handling and cleanup

Zeng, Oak oak.zeng at intel.com
Tue Apr 23 03:22:03 UTC 2024



> -----Original Message-----
> From: Brost, Matthew <matthew.brost at intel.com>
> Sent: Friday, April 19, 2024 3:16 PM
> To: Zeng, Oak <oak.zeng at intel.com>
> Cc: intel-xe at lists.freedesktop.org
> Subject: Re: [PATCH 06/13] drm/xe: Simplify VM bind IOCTL error handling
> and cleanup
> 
> On Thu, Apr 18, 2024 at 10:19:04PM -0600, Zeng, Oak wrote:
> > It is a nice clean up. See one question inline
> >
> > > -----Original Message-----
> > > From: Brost, Matthew <matthew.brost at intel.com>
> > > Sent: Wednesday, April 10, 2024 1:41 AM
> > > To: intel-xe at lists.freedesktop.org
> > > Cc: Brost, Matthew <matthew.brost at intel.com>; Zeng, Oak
> > > <oak.zeng at intel.com>
> > > Subject: [PATCH 06/13] drm/xe: Simplify VM bind IOCTL error handling
> and
> > > cleanup
> > >
> > > Clean up everything in VM bind IOCTL in 1 path for both errors and
> > > non-errors. Also move VM bind IOCTL cleanup from ops (also used by
> > > non-IOCTL binds) to the VM bind IOCTL.
> > >
> > > v2:
> > >  - Break ops_execute on error (Oak)
> > >
> > > Cc: Oak Zeng <oak.zeng at intel.com>
> > > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/xe_vm.c       | 67 ++++++--------------------------
> > >  drivers/gpu/drm/xe/xe_vm_types.h |  5 ---
> > >  2 files changed, 12 insertions(+), 60 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > > index 9d82396cf5d5..8f5b24c8f6cd 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > @@ -842,8 +842,7 @@ static int xe_vm_ops_add_rebind(struct
> xe_vma_ops
> > > *vops, struct xe_vma *vma,
> > >  }
> > >
> > >  static struct dma_fence *ops_execute(struct xe_vm *vm,
> > > -				     struct xe_vma_ops *vops,
> > > -				     bool cleanup);
> > > +				     struct xe_vma_ops *vops);
> > >  static void xe_vma_ops_init(struct xe_vma_ops *vops);
> > >
> > >  int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
> > > @@ -876,7 +875,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool
> > > rebind_worker)
> > >  			goto free_ops;
> > >  	}
> > >
> > > -	fence = ops_execute(vm, &vops, false);
> > > +	fence = ops_execute(vm, &vops);
> > >  	if (IS_ERR(fence)) {
> > >  		err = PTR_ERR(fence);
> > >  	} else {
> > > @@ -2551,7 +2550,6 @@ static int vm_bind_ioctl_ops_parse(struct
> xe_vm
> > > *vm, struct xe_exec_queue *q,
> > >  	if (!last_op)
> > >  		return 0;
> > >
> > > -	last_op->ops = ops;
> > >  	if (last) {
> > >  		last_op->flags |= XE_VMA_OP_LAST;
> > >  		last_op->num_syncs = num_syncs;
> > > @@ -2721,25 +2719,6 @@ xe_vma_op_execute(struct xe_vm *vm,
> struct
> > > xe_vma_op *op)
> > >  	return fence;
> > >  }
> > >
> > > -static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op
> *op)
> > > -{
> > > -	bool last = op->flags & XE_VMA_OP_LAST;
> > > -
> > > -	if (last) {
> > > -		while (op->num_syncs--)
> > > -			xe_sync_entry_cleanup(&op->syncs[op-
> > > >num_syncs]);
> >
> > I understand all the other part of this function is not needed anymore. But I
> didn't figure out why sync_entry_cleanup is not needed? You still have syncs,
> do you? They are allocated in bind_ioctl function and seems you didn't touch
> it in this patch....Can you explain?
> >
> 
> It is called in the main IOCTL code (xe_vm_bind_ioctl) now.
> 
> See below.
> 
> > Oak
> >
> >
> > > -		kfree(op->syncs);
> > > -		if (op->q)
> > > -			xe_exec_queue_put(op->q);
> > > -	}
> > > -	if (!list_empty(&op->link))
> > > -		list_del(&op->link);
> > > -	if (op->ops)
> > > -		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
> > > -	if (last)
> > > -		xe_vm_put(vm);
> > > -}
> > > -
> > >  static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op
> *op,
> > >  			     bool post_commit, bool prev_post_commit,
> > >  			     bool next_post_commit)
> > > @@ -2816,8 +2795,6 @@ static void vm_bind_ioctl_ops_unwind(struct
> > > xe_vm *vm,
> > >  					 op->flags &
> > > XE_VMA_OP_PREV_COMMITTED,
> > >  					 op->flags &
> > > XE_VMA_OP_NEXT_COMMITTED);
> > >  		}
> > > -
> > > -		drm_gpuva_ops_free(&vm->gpuvm, __ops);
> > >  	}
> > >  }
> > >
> > > @@ -2904,24 +2881,20 @@ static int
> > > vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
> > >  }
> > >
> > >  static struct dma_fence *ops_execute(struct xe_vm *vm,
> > > -				     struct xe_vma_ops *vops,
> > > -				     bool cleanup)
> > > +				     struct xe_vma_ops *vops)
> > >  {
> > >  	struct xe_vma_op *op, *next;
> > >  	struct dma_fence *fence = NULL;
> > >
> > >  	list_for_each_entry_safe(op, next, &vops->list, link) {
> > > -		if (!IS_ERR(fence)) {
> > > -			dma_fence_put(fence);
> > > -			fence = xe_vma_op_execute(vm, op);
> > > -		}
> > > +		dma_fence_put(fence);
> > > +		fence = xe_vma_op_execute(vm, op);
> > >  		if (IS_ERR(fence)) {
> > >  			drm_warn(&vm->xe->drm, "VM op(%d) failed
> > > with %ld",
> > >  				 op->base.op, PTR_ERR(fence));
> > >  			fence = ERR_PTR(-ENOSPC);
> > > +			break;
> > >  		}
> > > -		if (cleanup)
> > > -			xe_vma_op_cleanup(vm, op);
> > >  	}
> > >
> > >  	return fence;
> > > @@ -2944,7 +2917,7 @@ static int vm_bind_ioctl_ops_execute(struct
> xe_vm
> > > *vm,
> > >  		if (err)
> > >  			goto unlock;
> > >
> > > -		fence = ops_execute(vm, vops, true);
> > > +		fence = ops_execute(vm, vops);
> > >  		if (IS_ERR(fence)) {
> > >  			err = PTR_ERR(fence);
> > >  			/* FIXME: Killing VM rather than proper error
> > > handling */
> > > @@ -3305,30 +3278,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev,
> > > void *data, struct drm_file *file)
> > >  		goto unwind_ops;
> > >  	}
> > >
> > > -	xe_vm_get(vm);
> > > -	if (q)
> > > -		xe_exec_queue_get(q);
> > > -
> > >  	err = vm_bind_ioctl_ops_execute(vm, &vops);
> > >
> > > -	up_write(&vm->lock);
> > > -
> > > -	if (q)
> > > -		xe_exec_queue_put(q);
> > > -	xe_vm_put(vm);
> > > -
> > > -	for (i = 0; bos && i < args->num_binds; ++i)
> > > -		xe_bo_put(bos[i]);
> > > -
> > > -	kvfree(bos);
> > > -	kvfree(ops);
> > > -	if (args->num_binds > 1)
> > > -		kvfree(bind_ops);
> > > -
> > > -	return err;
> > > -
> 
> We now fall throuh to the cleanup in both success and error paths...
> 
> > >  unwind_ops:
> > > -	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
> > > +	if (err && err != -ENODATA)
> > > +		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
> > > +	for (i = args->num_binds - 1; i >= 0; --i)
> > > +		if (ops[i])
> > > +			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
> > >  free_syncs:
> > >  	if (err == -ENODATA)
> > >  		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
> 
> The next few lines of code call xe_sync_entry_cleanup.


Got it. Patch is:

Reviewed-by: Oak Zeng <oak.zeng at intel.com>
> 
> Matt
> 
> > > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> > > b/drivers/gpu/drm/xe/xe_vm_types.h
> > > index 466b6c62d1f9..149ab892967e 100644
> > > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > > @@ -330,11 +330,6 @@ enum xe_vma_op_flags {
> > >  struct xe_vma_op {
> > >  	/** @base: GPUVA base operation */
> > >  	struct drm_gpuva_op base;
> > > -	/**
> > > -	 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
> > > -	 * operations is processed
> > > -	 */
> > > -	struct drm_gpuva_ops *ops;
> > >  	/** @q: exec queue for this operation */
> > >  	struct xe_exec_queue *q;
> > >  	/**
> > > --
> > > 2.34.1
> >


More information about the Intel-xe mailing list