[PATCH v2] drm/xe/uapi: Remove support for persistent exec_queues

Thomas Hellström thomas.hellstrom at linux.intel.com
Thu Feb 8 15:37:49 UTC 2024


On Thu, 2024-02-08 at 09:34 -0600, Lucas De Marchi wrote:
> On Thu, Feb 08, 2024 at 09:38:45AM +0100, Thomas Hellström wrote:
> > Persistent exec_queues delays explicit destruction of exec_queues
> > until they are done executing, but destruction on process exit
> > is still immediate. It turns out no UMD is relying on this
> > functionality, so remove it. If there turns out to be a use-case
> > in the future, let's re-add.
> > 
> > Persistent exec_queues were never used for LR VMs
> > 
> > v2:
> > - Don't add an "UNUSED" define for the missing property
> >  (Lucas, Rodrigo)
> > 
> > Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel
> > GPUs")
> > Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > Cc: Matthew Brost <matthew.brost at intel.com>
> > Cc: David Airlie <airlied at gmail.com>
> > Cc: Daniel Vetter <daniel at ffwll.ch>
> > Cc: Lucas De Marchi <lucas.demarchi at intel.com>
> > Cc: Francois Dugast <francois.dugast at intel.com>
> > Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> 
> I think you missed a review comment from Niranjana. Should add this
> diff
> on top:

Ah, yes you're right. I'll respin. Need an ack from UMD as well.
/Thomas


> 
> 
> > 	diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > 	index 648391961fc4..a695f97ac0f3 100644
> > 	--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > 	+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > 	@@ -105,16 +105,6 @@ struct xe_exec_queue {
> > 			struct xe_guc_exec_queue *guc;
> > 		};
> > 	 
> > 	-	/**
> > 	-	 * @persistent: persistent exec queue state
> > 	-	 */
> > 	-	struct {
> > 	-		/** @persistent.xef: file which this exec
> > queue belongs to */
> > 	-		struct xe_file *xef;
> > 	-		/** @persisiten.link: link in list of
> > persistent exec queues */
> > 	-		struct list_head link;
> > 	-	} persistent;
> > 	-
> > 		union {
> > 			/**
> > 			 * @parallel: parallel submission state
> 
> with that,  Reviewed-by: Lucas De Marchi <lucas.demarchi at intel.com>
> 
> Lucas De Marchi
> 
> > ---
> > drivers/gpu/drm/xe/xe_device.c       | 39 -------------------------
> > ---
> > drivers/gpu/drm/xe/xe_device.h       |  4 ---
> > drivers/gpu/drm/xe/xe_device_types.h |  8 ------
> > drivers/gpu/drm/xe/xe_exec_queue.c   | 36 +++++--------------------
> > drivers/gpu/drm/xe/xe_execlist.c     |  2 --
> > drivers/gpu/drm/xe/xe_guc_submit.c   |  2 --
> > include/uapi/drm/xe_drm.h            |  1 -
> > 7 files changed, 6 insertions(+), 86 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_device.c
> > b/drivers/gpu/drm/xe/xe_device.c
> > index 5b84d7305520..1cf7561c8b4d 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -86,9 +86,6 @@ static int xe_file_open(struct drm_device *dev,
> > struct drm_file *file)
> > 	return 0;
> > }
> > 
> > -static void device_kill_persistent_exec_queues(struct xe_device
> > *xe,
> > -					       struct xe_file
> > *xef);
> > -
> > static void xe_file_close(struct drm_device *dev, struct drm_file
> > *file)
> > {
> > 	struct xe_device *xe = to_xe_device(dev);
> > @@ -105,8 +102,6 @@ static void xe_file_close(struct drm_device
> > *dev, struct drm_file *file)
> > 	mutex_unlock(&xef->exec_queue.lock);
> > 	xa_destroy(&xef->exec_queue.xa);
> > 	mutex_destroy(&xef->exec_queue.lock);
> > -	device_kill_persistent_exec_queues(xe, xef);
> > -
> > 	mutex_lock(&xef->vm.lock);
> > 	xa_for_each(&xef->vm.xa, idx, vm)
> > 		xe_vm_close_and_put(vm);
> > @@ -258,9 +253,6 @@ struct xe_device *xe_device_create(struct
> > pci_dev *pdev,
> > 			xa_erase(&xe->usm.asid_to_vm, asid);
> > 	}
> > 
> > -	drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
> > -	INIT_LIST_HEAD(&xe->persistent_engines.list);
> > -
> > 	spin_lock_init(&xe->pinned.lock);
> > 	INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
> > 	INIT_LIST_HEAD(&xe->pinned.external_vram);
> > @@ -599,37 +591,6 @@ void xe_device_shutdown(struct xe_device *xe)
> > {
> > }
> > 
> > -void xe_device_add_persistent_exec_queues(struct xe_device *xe,
> > struct xe_exec_queue *q)
> > -{
> > -	mutex_lock(&xe->persistent_engines.lock);
> > -	list_add_tail(&q->persistent.link, &xe-
> > >persistent_engines.list);
> > -	mutex_unlock(&xe->persistent_engines.lock);
> > -}
> > -
> > -void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
> > -					     struct xe_exec_queue
> > *q)
> > -{
> > -	mutex_lock(&xe->persistent_engines.lock);
> > -	if (!list_empty(&q->persistent.link))
> > -		list_del(&q->persistent.link);
> > -	mutex_unlock(&xe->persistent_engines.lock);
> > -}
> > -
> > -static void device_kill_persistent_exec_queues(struct xe_device
> > *xe,
> > -					       struct xe_file
> > *xef)
> > -{
> > -	struct xe_exec_queue *q, *next;
> > -
> > -	mutex_lock(&xe->persistent_engines.lock);
> > -	list_for_each_entry_safe(q, next, &xe-
> > >persistent_engines.list,
> > -				 persistent.link)
> > -		if (q->persistent.xef == xef) {
> > -			xe_exec_queue_kill(q);
> > -			list_del_init(&q->persistent.link);
> > -		}
> > -	mutex_unlock(&xe->persistent_engines.lock);
> > -}
> > -
> > void xe_device_wmb(struct xe_device *xe)
> > {
> > 	struct xe_gt *gt = xe_root_mmio_gt(xe);
> > diff --git a/drivers/gpu/drm/xe/xe_device.h
> > b/drivers/gpu/drm/xe/xe_device.h
> > index 462f59e902b1..14be34d9f543 100644
> > --- a/drivers/gpu/drm/xe/xe_device.h
> > +++ b/drivers/gpu/drm/xe/xe_device.h
> > @@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
> > void xe_device_remove(struct xe_device *xe);
> > void xe_device_shutdown(struct xe_device *xe);
> > 
> > -void xe_device_add_persistent_exec_queues(struct xe_device *xe,
> > struct xe_exec_queue *q);
> > -void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
> > -					     struct xe_exec_queue
> > *q);
> > -
> > void xe_device_wmb(struct xe_device *xe);
> > 
> > static inline struct xe_file *to_xe_file(const struct drm_file
> > *file)
> > diff --git a/drivers/gpu/drm/xe/xe_device_types.h
> > b/drivers/gpu/drm/xe/xe_device_types.h
> > index eb2b806a1d23..9785eef2e5a4 100644
> > --- a/drivers/gpu/drm/xe/xe_device_types.h
> > +++ b/drivers/gpu/drm/xe/xe_device_types.h
> > @@ -348,14 +348,6 @@ struct xe_device {
> > 		struct mutex lock;
> > 	} usm;
> > 
> > -	/** @persistent_engines: engines that are closed but still
> > running */
> > -	struct {
> > -		/** @persistent_engines.lock: protects persistent
> > engines */
> > -		struct mutex lock;
> > -		/** @persistent_engines.list: list of persistent
> > engines */
> > -		struct list_head list;
> > -	} persistent_engines;
> > -
> > 	/** @pinned: pinned BO state */
> > 	struct {
> > 		/** @pinned.lock: protected pinned BO list state
> > */
> > diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c
> > b/drivers/gpu/drm/xe/xe_exec_queue.c
> > index 2976635be4d3..c2bcb5735e7b 100644
> > --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> > +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> > @@ -60,7 +60,6 @@ static struct xe_exec_queue
> > *__xe_exec_queue_alloc(struct xe_device *xe,
> > 	q->fence_irq = &gt->fence_irq[hwe->class];
> > 	q->ring_ops = gt->ring_ops[hwe->class];
> > 	q->ops = gt->exec_queue_ops;
> > -	INIT_LIST_HEAD(&q->persistent.link);
> > 	INIT_LIST_HEAD(&q->compute.link);
> > 	INIT_LIST_HEAD(&q->multi_gt_link);
> > 
> > @@ -379,23 +378,6 @@ static int
> > exec_queue_set_preemption_timeout(struct xe_device *xe,
> > 	return 0;
> > }
> > 
> > -static int exec_queue_set_persistence(struct xe_device *xe, struct
> > xe_exec_queue *q,
> > -				      u64 value, bool create)
> > -{
> > -	if (XE_IOCTL_DBG(xe, !create))
> > -		return -EINVAL;
> > -
> > -	if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
> > -		return -EINVAL;
> > -
> > -	if (value)
> > -		q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
> > -	else
> > -		q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
> > -
> > -	return 0;
> > -}
> > -
> > static int exec_queue_set_job_timeout(struct xe_device *xe, struct
> > xe_exec_queue *q,
> > 				      u64 value, bool create)
> > {
> > @@ -469,7 +451,6 @@ static const xe_exec_queue_set_property_fn
> > exec_queue_set_property_funcs[] = {
> > 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] =
> > exec_queue_set_priority,
> > 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] =
> > exec_queue_set_timeslice,
> > 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] =
> > exec_queue_set_preemption_timeout,
> > -	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] =
> > exec_queue_set_persistence,
> > 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] =
> > exec_queue_set_job_timeout,
> > 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] =
> > exec_queue_set_acc_trigger,
> > 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] =
> > exec_queue_set_acc_notify,
> > @@ -496,6 +477,9 @@ static int
> > exec_queue_user_ext_set_property(struct xe_device *xe,
> > 		return -EINVAL;
> > 
> > 	idx = array_index_nospec(ext.property,
> > ARRAY_SIZE(exec_queue_set_property_funcs));
> > +	if (!exec_queue_set_property_funcs[idx])
> > +		return -EINVAL;
> > +
> > 	return exec_queue_set_property_funcs[idx](xe, q,
> > ext.value,  create);
> > }
> > 
> > @@ -707,8 +691,7 @@ int xe_exec_queue_create_ioctl(struct
> > drm_device *dev, void *data,
> > 			/* The migration vm doesn't hold rpm ref
> > */
> > 			xe_device_mem_access_get(xe);
> > 
> > -			flags = EXEC_QUEUE_FLAG_PERSISTENT |
> > EXEC_QUEUE_FLAG_VM |
> > -				(id ?
> > EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
> > +			flags = EXEC_QUEUE_FLAG_VM | (id ?
> > EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
> > 
> > 			migrate_vm =
> > xe_migrate_get_vm(gt_to_tile(gt)->migrate);
> > 			new = xe_exec_queue_create(xe, migrate_vm,
> > logical_mask,
> > @@ -759,9 +742,7 @@ int xe_exec_queue_create_ioctl(struct
> > drm_device *dev, void *data,
> > 		}
> > 
> > 		q = xe_exec_queue_create(xe, vm, logical_mask,
> > -					 args->width, hwe,
> > -					 xe_vm_in_lr_mode(vm) ? 0
> > :
> > -					
> > EXEC_QUEUE_FLAG_PERSISTENT,
> > +					 args->width, hwe, 0,
> > 					 args->extensions);
> > 		up_read(&vm->lock);
> > 		xe_vm_put(vm);
> > @@ -778,8 +759,6 @@ int xe_exec_queue_create_ioctl(struct
> > drm_device *dev, void *data,
> > 		}
> > 	}
> > 
> > -	q->persistent.xef = xef;
> > -
> > 	mutex_lock(&xef->exec_queue.lock);
> > 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b,
> > GFP_KERNEL);
> > 	mutex_unlock(&xef->exec_queue.lock);
> > @@ -922,10 +901,7 @@ int xe_exec_queue_destroy_ioctl(struct
> > drm_device *dev, void *data,
> > 	if (XE_IOCTL_DBG(xe, !q))
> > 		return -ENOENT;
> > 
> > -	if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
> > -		xe_exec_queue_kill(q);
> > -	else
> > -		xe_device_add_persistent_exec_queues(xe, q);
> > +	xe_exec_queue_kill(q);
> > 
> > 	trace_xe_exec_queue_close(q);
> > 	xe_exec_queue_put(q);
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c
> > b/drivers/gpu/drm/xe/xe_execlist.c
> > index 58dfe6a78ffe..1788e78caf5c 100644
> > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > +++ b/drivers/gpu/drm/xe/xe_execlist.c
> > @@ -378,8 +378,6 @@ static void
> > execlist_exec_queue_fini_async(struct work_struct *w)
> > 		list_del(&exl->active_link);
> > 	spin_unlock_irqrestore(&exl->port->lock, flags);
> > 
> > -	if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
> > -		xe_device_remove_persistent_exec_queues(xe, q);
> > 	drm_sched_entity_fini(&exl->entity);
> > 	drm_sched_fini(&exl->sched);
> > 	kfree(exl);
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c
> > b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index 4744668ef60a..efee08680aea 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -1031,8 +1031,6 @@ static void
> > __guc_exec_queue_fini_async(struct work_struct *w)
> > 
> > 	if (xe_exec_queue_is_lr(q))
> > 		cancel_work_sync(&ge->lr_tdr);
> > -	if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
> > -
> > 		xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
> > 	release_guc_id(guc, q);
> > 	xe_sched_entity_fini(&ge->entity);
> > 	xe_sched_fini(&ge->sched);
> > diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> > index 50bbea0992d9..ee4673d5d1c1 100644
> > --- a/include/uapi/drm/xe_drm.h
> > +++ b/include/uapi/drm/xe_drm.h
> > @@ -1045,7 +1045,6 @@ struct drm_xe_exec_queue_create {
> > #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY		0
> > #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1
> > #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
> > -#define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE		3
> > #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT		4
> > #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER		5
> > #define   DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY		6
> > -- 
> > 2.43.0
> > 



More information about the Intel-xe mailing list