[PATCH v4 06/30] drm/xe: Simplify VM bind IOCTL error handling and cleanup

Zeng, Oak oak.zeng at intel.com
Mon Mar 25 16:03:27 UTC 2024


Hi Matt,

This looks like a nice clean up to me. See one comment inline.

> -----Original Message-----
> From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of Matthew
> Brost
> Sent: Friday, March 8, 2024 12:08 AM
> To: intel-xe at lists.freedesktop.org
> Cc: Brost, Matthew <matthew.brost at intel.com>
> Subject: [PATCH v4 06/30] drm/xe: Simplify VM bind IOCTL error handling and
> cleanup
> 
> Clean up everything in VM bind IOCTL in 1 path for both errors and
> non-errors. Also move VM bind IOCTL cleanup from ops (also used by
> non-IOCTL binds) to the VM bind IOCTL.
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c       | 60 +++++---------------------------
>  drivers/gpu/drm/xe/xe_vm_types.h |  5 ---
>  2 files changed, 9 insertions(+), 56 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 0bb807c05d7b..dde777c807cf 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -769,8 +769,7 @@ static void xe_vm_populate_dummy_rebind(struct
> xe_vm *vm, struct xe_vma *vma)
>  }
> 
>  static struct dma_fence *ops_execute(struct xe_vm *vm,
> -				     struct xe_vma_ops *vops,
> -				     bool cleanup);
> +				     struct xe_vma_ops *vops);
> 
>  struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
>  {
> @@ -794,7 +793,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm,
> bool rebind_worker)
>  			trace_xe_vma_rebind_exec(vma);
> 
>  		xe_vm_populate_dummy_rebind(vm, vma);
> -		fence = ops_execute(vm, &vm->dummy_ops.vops, false);
> +		fence = ops_execute(vm, &vm->dummy_ops.vops);
>  		if (IS_ERR(fence))
>  			return fence;
>  	}
> @@ -2474,7 +2473,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm,
> struct xe_exec_queue *q,
>  	if (!last_op)
>  		return 0;
> 
> -	last_op->ops = ops;
>  	if (last) {
>  		last_op->flags |= XE_VMA_OP_LAST;
>  		last_op->num_syncs = num_syncs;
> @@ -2643,25 +2641,6 @@ xe_vma_op_execute(struct xe_vm *vm, struct
> xe_vma_op *op)
>  	return fence;
>  }
> 
> -static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
> -{
> -	bool last = op->flags & XE_VMA_OP_LAST;
> -
> -	if (last) {
> -		while (op->num_syncs--)
> -			xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
> -		kfree(op->syncs);
> -		if (op->q)
> -			xe_exec_queue_put(op->q);
> -	}
> -	if (!list_empty(&op->link))
> -		list_del(&op->link);
> -	if (op->ops)
> -		drm_gpuva_ops_free(&vm->gpuvm, op->ops);
> -	if (last)
> -		xe_vm_put(vm);
> -}
> -
>  static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
>  			     bool post_commit, bool prev_post_commit,
>  			     bool next_post_commit)
> @@ -2738,8 +2717,6 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm
> *vm,
>  					 op->flags &
> XE_VMA_OP_PREV_COMMITTED,
>  					 op->flags &
> XE_VMA_OP_NEXT_COMMITTED);
>  		}
> -
> -		drm_gpuva_ops_free(&vm->gpuvm, __ops);
>  	}
>  }
> 
> @@ -2818,8 +2795,7 @@ static int vm_bind_ioctl_ops_lock(struct drm_exec
> *exec,
>  }
> 
>  static struct dma_fence *ops_execute(struct xe_vm *vm,
> -				     struct xe_vma_ops *vops,
> -				     bool cleanup)
> +				     struct xe_vma_ops *vops)
>  {
>  	struct xe_vma_op *op, *next;
>  	struct dma_fence *fence = NULL;
> @@ -2834,8 +2810,6 @@ static struct dma_fence *ops_execute(struct xe_vm
> *vm,
>  				 op->base.op, PTR_ERR(fence));
>  			fence = ERR_PTR(-ENOSPC);
>  		}
> -		if (cleanup)
> -			xe_vma_op_cleanup(vm, op);


Now with this cleanup code removed, do you still want to loop all the ops in the list when xe_vma_op_execute failed with error? Should we break and return earlier in this case?

Oak

>  	}
> 
>  	return fence;
> @@ -2858,7 +2832,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm
> *vm,
>  		if (err)
>  			goto unlock;
> 
> -		fence = ops_execute(vm, vops, true);
> +		fence = ops_execute(vm, vops);
>  		if (IS_ERR(fence)) {
>  			err = PTR_ERR(fence);
>  			/* FIXME: Killing VM rather than proper error handling */
> @@ -3211,30 +3185,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void
> *data, struct drm_file *file)
>  		goto unwind_ops;
>  	}
> 
> -	xe_vm_get(vm);
> -	if (q)
> -		xe_exec_queue_get(q);
> -
>  	err = vm_bind_ioctl_ops_execute(vm, &vops);
> 
> -	up_write(&vm->lock);
> -
> -	if (q)
> -		xe_exec_queue_put(q);
> -	xe_vm_put(vm);
> -
> -	for (i = 0; bos && i < args->num_binds; ++i)
> -		xe_bo_put(bos[i]);
> -
> -	kvfree(bos);
> -	kvfree(ops);
> -	if (args->num_binds > 1)
> -		kvfree(bind_ops);
> -
> -	return err;
> -
>  unwind_ops:
> -	vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
> +	if (err && err != -ENODATA)
> +		vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
> +	for (i = args->num_binds - 1; i >= 0; --i)
> +		if (ops[i])
> +			drm_gpuva_ops_free(&vm->gpuvm, ops[i]);
>  free_syncs:
>  	if (err == -ENODATA)
>  		err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> b/drivers/gpu/drm/xe/xe_vm_types.h
> index 7ef9e632154a..f097fe318a74 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -181,11 +181,6 @@ enum xe_vma_op_flags {
>  struct xe_vma_op {
>  	/** @base: GPUVA base operation */
>  	struct drm_gpuva_op base;
> -	/**
> -	 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
> -	 * operations is processed
> -	 */
> -	struct drm_gpuva_ops *ops;
>  	/** @q: exec queue for this operation */
>  	struct xe_exec_queue *q;
>  	/**
> --
> 2.34.1



More information about the Intel-xe mailing list