[PATCH 1/2] Revert "drm/xe/vm: drop vm->destroy_work"
Matthew Brost
matthew.brost at intel.com
Wed Apr 24 03:44:29 UTC 2024
On Tue, Apr 23, 2024 at 08:47:22AM +0100, Matthew Auld wrote:
> This reverts commit 5b259c0d1d3caa6efc66c2b856840e68993f814e.
>
> Cleanup here is good, however we need to able to flush a worker during
> vm destruction which might involve sleeping, so bring back the worker.
>
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
I guess the alternative is is a lock + plus enable variable around
queuing of the rebind worker? I suppose I prefer leaving the destroy
worker intacted than a new lock. Also there is a large chance at some
point in the future we will need to sleep again on VM destroy and will
need the destroy worker anyways.
With that:
Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_vm.c | 17 +++++++++++++++--
> drivers/gpu/drm/xe/xe_vm_types.h | 7 +++++++
> 2 files changed, 22 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> index 85d6f359142d..2ba7c920a8af 100644
> --- a/drivers/gpu/drm/xe/xe_vm.c
> +++ b/drivers/gpu/drm/xe/xe_vm.c
> @@ -1178,6 +1178,8 @@ static const struct xe_pt_ops xelp_pt_ops = {
> .pde_encode_bo = xelp_pde_encode_bo,
> };
>
> +static void vm_destroy_work_func(struct work_struct *w);
> +
> /**
> * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
> * given tile and vm.
> @@ -1257,6 +1259,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
> init_rwsem(&vm->userptr.notifier_lock);
> spin_lock_init(&vm->userptr.invalidated_lock);
>
> + INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
> +
> INIT_LIST_HEAD(&vm->preempt.exec_queues);
> vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
>
> @@ -1494,9 +1498,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
> xe_vm_put(vm);
> }
>
> -static void xe_vm_free(struct drm_gpuvm *gpuvm)
> +static void vm_destroy_work_func(struct work_struct *w)
> {
> - struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
> + struct xe_vm *vm =
> + container_of(w, struct xe_vm, destroy_work);
> struct xe_device *xe = vm->xe;
> struct xe_tile *tile;
> u8 id;
> @@ -1516,6 +1521,14 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
> kfree(vm);
> }
>
> +static void xe_vm_free(struct drm_gpuvm *gpuvm)
> +{
> + struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
> +
> + /* To destroy the VM we need to be able to sleep */
> + queue_work(system_unbound_wq, &vm->destroy_work);
> +}
> +
> struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
> {
> struct xe_vm *vm;
> diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> index 7570c2c6c463..badf3945083d 100644
> --- a/drivers/gpu/drm/xe/xe_vm_types.h
> +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> @@ -177,6 +177,13 @@ struct xe_vm {
> */
> struct list_head rebind_list;
>
> + /**
> + * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
> + * from an irq context can be last put and the destroy needs to be able
> + * to sleep.
> + */
> + struct work_struct destroy_work;
> +
> /**
> * @rftree: range fence tree to track updates to page table structure.
> * Used to implement conflict tracking between independent bind engines.
> --
> 2.44.0
>
More information about the Intel-xe
mailing list