[PATCH 09/13] drm/xe: Add vm_bind_ioctl_ops_fini helper

Zeng, Oak oak.zeng at intel.com
Fri Apr 19 14:51:12 UTC 2024


It is a nice clean up. Patch is:

Reviewed-by: Oak Zeng <oak.zeng at intel.com>

> -----Original Message-----
> From: Brost, Matthew <matthew.brost at intel.com>
> Sent: Wednesday, April 10, 2024 1:41 AM
> To: intel-xe at lists.freedesktop.org
> Cc: Brost, Matthew <matthew.brost at intel.com>; Zeng, Oak
> <oak.zeng at intel.com>
> Subject: [PATCH 09/13] drm/xe: Add vm_bind_ioctl_ops_fini helper
> 
> Simplify VM bind code by signaling out-fences / destroying VMAs in a
> single location. Will help with transition single job for many bind ops.
> 
> v2:
>  - s/vm_bind_ioctl_ops_install_fences/vm_bind_ioctl_ops_fini (Oak)
>  - Set last fence in vm_bind_ioctl_ops_fini (Oak)
> 
> Cc: Oak Zeng <oak.zeng at intel.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_vm.c | 62 +++++++++++++++-----------------------
>  1 file changed, 24 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 09871538484b..97384c77f662 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1748,7 +1748,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct
> xe_exec_queue *q,
>  	struct dma_fence *fence = NULL;
>  	struct dma_fence **fences = NULL;
>  	struct dma_fence_array *cf = NULL;
> -	int cur_fence = 0, i;
> +	int cur_fence = 0;
>  	int number_tiles = hweight8(vma->tile_present);
>  	int err;
>  	u8 id;
> @@ -1806,10 +1806,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct
> xe_exec_queue *q,
> 
>  	fence = cf ? &cf->base : !fence ?
>  		xe_exec_queue_last_fence_get(wait_exec_queue, vm) :
> fence;
> -	if (last_op) {
> -		for (i = 0; i < num_syncs; i++)
> -			xe_sync_entry_signal(&syncs[i], fence);
> -	}
> 
>  	return fence;
> 
> @@ -1833,7 +1829,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct
> xe_exec_queue *q,
>  	struct dma_fence **fences = NULL;
>  	struct dma_fence_array *cf = NULL;
>  	struct xe_vm *vm = xe_vma_vm(vma);
> -	int cur_fence = 0, i;
> +	int cur_fence = 0;
>  	int number_tiles = hweight8(tile_mask);
>  	int err;
>  	u8 id;
> @@ -1880,12 +1876,6 @@ xe_vm_bind_vma(struct xe_vma *vma, struct
> xe_exec_queue *q,
>  		}
>  	}
> 
> -	if (last_op) {
> -		for (i = 0; i < num_syncs; i++)
> -			xe_sync_entry_signal(&syncs[i],
> -					     cf ? &cf->base : fence);
> -	}
> -
>  	return cf ? &cf->base : fence;
> 
>  err_fences:
> @@ -1937,20 +1927,11 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma
> *vma, struct xe_exec_queue *q,
>  		if (IS_ERR(fence))
>  			return fence;
>  	} else {
> -		int i;
> -
>  		xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
> 
>  		fence = xe_exec_queue_last_fence_get(wait_exec_queue,
> vm);
> -		if (last_op) {
> -			for (i = 0; i < num_syncs; i++)
> -				xe_sync_entry_signal(&syncs[i], fence);
> -		}
>  	}
> 
> -	if (last_op)
> -		xe_exec_queue_last_fence_set(wait_exec_queue, vm,
> fence);
> -
>  	return fence;
>  }
> 
> @@ -1960,7 +1941,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma
> *vma,
>  	     u32 num_syncs, bool first_op, bool last_op)
>  {
>  	struct dma_fence *fence;
> -	struct xe_exec_queue *wait_exec_queue =
> to_wait_exec_queue(vm, q);
> 
>  	xe_vm_assert_held(vm);
>  	xe_bo_assert_held(xe_vma_bo(vma));
> @@ -1969,10 +1949,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma
> *vma,
>  	if (IS_ERR(fence))
>  		return fence;
> 
> -	xe_vma_destroy(vma, fence);
> -	if (last_op)
> -		xe_exec_queue_last_fence_set(wait_exec_queue, vm,
> fence);
> -
>  	return fence;
>  }
> 
> @@ -2127,17 +2103,7 @@ xe_vm_prefetch(struct xe_vm *vm, struct
> xe_vma *vma,
>  		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs,
> num_syncs,
>  				  vma->tile_mask, true, first_op, last_op);
>  	} else {
> -		struct dma_fence *fence =
> -			xe_exec_queue_last_fence_get(wait_exec_queue,
> vm);
> -		int i;
> -
> -		/* Nothing to do, signal fences now */
> -		if (last_op) {
> -			for (i = 0; i < num_syncs; i++)
> -				xe_sync_entry_signal(&syncs[i], fence);
> -		}
> -
> -		return fence;
> +		return xe_exec_queue_last_fence_get(wait_exec_queue,
> vm);
>  	}
>  }
> 
> @@ -2939,6 +2905,26 @@ static struct dma_fence *ops_execute(struct
> xe_vm *vm,
>  	return fence;
>  }
> 
> +static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops
> *vops,
> +				   struct dma_fence *fence)
> +{
> +	struct xe_exec_queue *wait_exec_queue =
> to_wait_exec_queue(vm, vops->q);
> +	struct xe_vma_op *op;
> +	int i;
> +
> +	list_for_each_entry(op, &vops->list, link) {
> +		if (op->base.op == DRM_GPUVA_OP_UNMAP)
> +			xe_vma_destroy(gpuva_to_vma(op-
> >base.unmap.va), fence);
> +		else if (op->base.op == DRM_GPUVA_OP_REMAP)
> +			xe_vma_destroy(gpuva_to_vma(op-
> >base.remap.unmap->va),
> +				       fence);
> +	}
> +	for (i = 0; i < vops->num_syncs; i++)
> +		xe_sync_entry_signal(vops->syncs + i, fence);
> +	xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
> +	dma_fence_put(fence);
> +}
> +
>  static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
>  				     struct xe_vma_ops *vops)
>  {
> @@ -2963,7 +2949,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm
> *vm,
>  			xe_vm_kill(vm, false);
>  			goto unlock;
>  		} else {
> -			dma_fence_put(fence);
> +			vm_bind_ioctl_ops_fini(vm, vops, fence);
>  		}
>  	}
> 
> --
> 2.34.1



More information about the Intel-xe mailing list