[PATCH 2/3] drm/xe/vm: drop vm->destroy_work

Matthew Brost matthew.brost at intel.com
Fri Apr 12 22:32:38 UTC 2024


On Fri, Apr 12, 2024 at 12:31:46PM +0100, Matthew Auld wrote:
> Now that we no longer grab the usm.lock mutex (which might sleep) it
> looks like it should be safe to directly perform xe_vm_free when vm
> refcount reaches zero, instead of punting that off to some worker.
> 
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>

This does look right in the current code base / series. However in [1] I
do suggest deferring 'close' part of xe_vm_close_and_put to the final put
if the device is wedged. If we do that, we might need worker again? I
guess we can figure it out if / when we decide to take my suggestion.

With that, this looks like a good cleanup:
Reviewed-by: Matthew Brost <matthew.brost at intel.com>

[1] https://patchwork.freedesktop.org/patch/588557/?series=132232&rev=1

> ---
>  drivers/gpu/drm/xe/xe_vm.c       | 17 ++---------------
>  drivers/gpu/drm/xe/xe_vm_types.h |  7 -------
>  2 files changed, 2 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index c5c26b3d1b76..300d166f412e 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1279,8 +1279,6 @@ static const struct xe_pt_ops xelp_pt_ops = {
>  	.pde_encode_bo = xelp_pde_encode_bo,
>  };
>  
> -static void vm_destroy_work_func(struct work_struct *w);
> -
>  /**
>   * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
>   * given tile and vm.
> @@ -1360,8 +1358,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
>  	init_rwsem(&vm->userptr.notifier_lock);
>  	spin_lock_init(&vm->userptr.invalidated_lock);
>  
> -	INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
> -
>  	INIT_LIST_HEAD(&vm->preempt.exec_queues);
>  	vm->preempt.min_run_period_ms = 10;	/* FIXME: Wire up to uAPI */
>  
> @@ -1599,10 +1595,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
>  	xe_vm_put(vm);
>  }
>  
> -static void vm_destroy_work_func(struct work_struct *w)
> +static void xe_vm_free(struct drm_gpuvm *gpuvm)
>  {
> -	struct xe_vm *vm =
> -		container_of(w, struct xe_vm, destroy_work);
> +	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
>  	struct xe_device *xe = vm->xe;
>  	struct xe_tile *tile;
>  	u8 id;
> @@ -1622,14 +1617,6 @@ static void vm_destroy_work_func(struct work_struct *w)
>  	kfree(vm);
>  }
>  
> -static void xe_vm_free(struct drm_gpuvm *gpuvm)
> -{
> -	struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
> -
> -	/* To destroy the VM we need to be able to sleep */
> -	queue_work(system_unbound_wq, &vm->destroy_work);
> -}
> -
>  struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
>  {
>  	struct xe_vm *vm;
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index badf3945083d..7570c2c6c463 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -177,13 +177,6 @@ struct xe_vm {
>  	 */
>  	struct list_head rebind_list;
>  
> -	/**
> -	 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
> -	 * from an irq context can be last put and the destroy needs to be able
> -	 * to sleep.
> -	 */
> -	struct work_struct destroy_work;
> -
>  	/**
>  	 * @rftree: range fence tree to track updates to page table structure.
>  	 * Used to implement conflict tracking between independent bind engines.
> -- 
> 2.44.0
> 


More information about the Intel-xe mailing list