[PATCH 2/6] drm/xe: Add generic dependecy jobs / scheduler

Matthew Brost matthew.brost at intel.com
Tue Jun 24 05:34:48 UTC 2025


On Wed, Jun 18, 2025 at 07:10:21AM -0700, Matthew Brost wrote:
> On Wed, Jun 18, 2025 at 01:20:42PM +0200, Thomas Hellström wrote:
> > On Tue, 2025-06-17 at 08:37 -0700, Matthew Brost wrote:
> > > Add generic dependecy jobs / scheduler which serves as wrapper for
> > > DRM
> > > scheduler. Useful when we want delay a generic operation until a
> > > dma-fence signals.
> > > 
> > > Existing use cases could be destroying of resources based fences /
> > > dma-resv, the preempt rebind worker, and pipelined GT TLB
> > > invalidations.
> > > 
> > > Written in such a way it could be moved to DRM subsystem if needed.
> > > 
> > > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > > ---
> > >  drivers/gpu/drm/xe/Makefile           |   1 +
> > >  drivers/gpu/drm/xe/xe_dep_job_types.h |  29 ++++++
> > >  drivers/gpu/drm/xe/xe_dep_scheduler.c | 145
> > > ++++++++++++++++++++++++++
> > >  drivers/gpu/drm/xe/xe_dep_scheduler.h |  21 ++++
> > >  4 files changed, 196 insertions(+)
> > >  create mode 100644 drivers/gpu/drm/xe/xe_dep_job_types.h
> > >  create mode 100644 drivers/gpu/drm/xe/xe_dep_scheduler.c
> > >  create mode 100644 drivers/gpu/drm/xe/xe_dep_scheduler.h
> > > 
> > > diff --git a/drivers/gpu/drm/xe/Makefile
> > > b/drivers/gpu/drm/xe/Makefile
> > > index f5f5775acdc0..3523bd093b7a 100644
> > > --- a/drivers/gpu/drm/xe/Makefile
> > > +++ b/drivers/gpu/drm/xe/Makefile
> > > @@ -28,6 +28,7 @@ $(obj)/generated/%_wa_oob.c
> > > $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
> > >  xe-y += xe_bb.o \
> > >  	xe_bo.o \
> > >  	xe_bo_evict.o \
> > > +	xe_dep_scheduler.o \
> > >  	xe_devcoredump.o \
> > >  	xe_device.o \
> > >  	xe_device_sysfs.o \
> > > diff --git a/drivers/gpu/drm/xe/xe_dep_job_types.h
> > > b/drivers/gpu/drm/xe/xe_dep_job_types.h
> > > new file mode 100644
> > > index 000000000000..c6a484f24c8c
> > > --- /dev/null
> > > +++ b/drivers/gpu/drm/xe/xe_dep_job_types.h
> > > @@ -0,0 +1,29 @@
> > > +/* SPDX-License-Identifier: MIT */
> > > +/*
> > > + * Copyright © 2025 Intel Corporation
> > > + */
> > > +
> > > +#ifndef _XE_DEP_JOB_TYPES_H_
> > > +#define _XE_DEP_JOB_TYPES_H_
> > > +
> > > +#include <drm/gpu_scheduler.h>
> > > +
> > > +struct xe_dep_job;
> > > +
> > > +/** struct xe_dep_job_ops - Generic Xe dependency job operations */
> > > +struct xe_dep_job_ops {
> > > +	/** @run_job: Run generic Xe dependency job */
> > > +	struct dma_fence *(*run_job)(struct xe_dep_job *job);
> > > +	/** @free_job: Free generic Xe dependency job */
> > > +	void (*free_job)(struct xe_dep_job *job);
> > > +};
> > > +
> > > +/** struct xe_dep_job - Generic dependency Xe job */
> > > +struct xe_dep_job {
> > > +	/** @drm: base DRM scheduler job */
> > > +	struct drm_sched_job drm;
> > > +	/** @ops: dependency job operations */
> > > +	const struct xe_dep_job_ops *ops;
> > > +};
> > > +
> > > +#endif
> > > diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.c
> > > b/drivers/gpu/drm/xe/xe_dep_scheduler.c
> > > new file mode 100644
> > > index 000000000000..fbd55577d787
> > > --- /dev/null
> > > +++ b/drivers/gpu/drm/xe/xe_dep_scheduler.c
> > > @@ -0,0 +1,145 @@
> > > +// SPDX-License-Identifier: MIT
> > > +/*
> > > + * Copyright © 2025 Intel Corporation
> > > + */
> > > +
> > > +#include <linux/slab.h>
> > > +
> > > +#include <drm/gpu_scheduler.h>
> > > +
> > > +#include "xe_dep_job_types.h"
> > > +#include "xe_dep_scheduler.h"
> > > +#include "xe_device_types.h"
> > > +
> > > +/**
> > > + * DOC: Xe Dependency Scheduler
> > > + *
> > > + * The Xe dependency scheduler is a simple wrapper built around the
> > > DRM
> > > + * scheduler to execute jobs once their dependencies are resolved
> > > (i.e., all
> > > + * input fences specified as dependencies are signaled). The jobs
> > > that are
> > > + * executed contain virtual functions to run (execute) and free the
> > > job,
> > > + * allowing a single dependency scheduler to handle jobs performing
> > > different
> > > + * operations.
> > > + *
> > > + * Example use cases include deferred resource freeing, TLB
> > > invalidations after
> > > + * bind jobs, etc.
> > > + */
> > > +
> > > +/** struct xe_dep_scheduler - Generic Xe dependency scheduler */
> > > +struct xe_dep_scheduler {
> > > +	/** @sched: DRM GPU scheduler */
> > > +	struct drm_gpu_scheduler sched;
> > 
> > Would it make sense to support sharing a single drm_gpu_scheduler for
> > many xe_dep_schedulers? Thinking of the first use-case where we'd
> 
> So do you mean sharing a single drm_gpu_scheduler for many
> drm_sched_entity? If that's what you mean, I think that would work. Each
> entity gets its own dma-fence contetxt and that is what we want - so
> we'd assign an entity per GT, queue tuple.
> 
> > probably want just a single drm_gpu_scheduler per GuC since
> > invalidations are ordered anyway?
> 
> A scheduler must be ordered in at job completion from job issue order
> which fits here too with a single scheduler per GuC.
> 
> That said, I'm on fence about the ROI of doing this type of refactor so
> it would make dep scheduler API a bit more complex given entities now
> need to dynamically allocated, added, and removed. I can play around
> with this restructure and see what it looks like in code I suppose.
> 

I thought about this a bit more, what actually might make a bit more
sense here is share scheduler a WQ for all TLB invalidations on a GT. We
fully contend on the TLB invalidation anyways, so little sense in
scheduling these on different WQs. All the interfaces exist for this,
just need pass in WQ argument when we create the xe_dep_schedulers in
the exec queue.

Matt

> Matt
> 
> > 
> > /Thomas
> > 
> > 
> > > +	/** @entity: DRM scheduler entity  */
> > > +	struct drm_sched_entity entity;
> > > +	/** @rcu: For safe freeing of exported dma fences */
> > > +	struct rcu_head rcu;
> > > +};
> > > +
> > > +static struct dma_fence *xe_dep_scheduler_run_job(struct
> > > drm_sched_job *drm_job)
> > > +{
> > > +	struct xe_dep_job *dep_job =
> > > +		container_of(drm_job, typeof(*dep_job), drm);
> > > +
> > > +	return dep_job->ops->run_job(dep_job);
> > > +}
> > > +
> > > +static void xe_dep_scheduler_free_job(struct drm_sched_job *drm_job)
> > > +{
> > > +	struct xe_dep_job *dep_job =
> > > +		container_of(drm_job, typeof(*dep_job), drm);
> > > +
> > > +	dep_job->ops->free_job(dep_job);
> > > +}
> > > +
> > > +static const struct drm_sched_backend_ops sched_ops = {
> > > +	.run_job = xe_dep_scheduler_run_job,
> > > +	.free_job = xe_dep_scheduler_free_job,
> > > +};
> > > +
> > > +/**
> > > + * xe_dep_scheduler_create() - Generic Xe dependency scheduler
> > > create
> > > + * @xe: Xe device
> > > + * @submit_wq: Submit workqueue struct (can be NULL)
> > > + * @name: Name of dependency scheduler
> > > + * @job_limit: Max dependency jobs that can be scheduled
> > > + *
> > > + * Create a generic Xe dependency scheduler and initialize internal
> > > DRM
> > > + * scheduler objects.
> > > + *
> > > + * Return: Generic Xe dependency scheduler object or ERR_PTR
> > > + */
> > > +struct xe_dep_scheduler *
> > > +xe_dep_scheduler_create(struct xe_device *xe,
> > > +			struct workqueue_struct *submit_wq,
> > > +			const char *name, u32 job_limit)
> > > +{
> > > +	struct xe_dep_scheduler *dep_scheduler;
> > > +	struct drm_gpu_scheduler *sched;
> > > +	const struct drm_sched_init_args args = {
> > > +		.ops = &sched_ops,
> > > +		.submit_wq = submit_wq,
> > > +		.num_rqs = 1,
> > > +		.credit_limit = job_limit,
> > > +		.timeout = MAX_SCHEDULE_TIMEOUT,
> > > +		.name = name,
> > > +		.dev = xe->drm.dev,
> > > +	};
> > > +	int err;
> > > +
> > > +	dep_scheduler = kzalloc(sizeof(*dep_scheduler), GFP_KERNEL);
> > > +	if (!dep_scheduler)
> > > +		return ERR_PTR(-ENOMEM);
> > > +
> > > +	err = drm_sched_init(&dep_scheduler->sched, &args);
> > > +	if (err)
> > > +		goto err_free;
> > > +
> > > +	sched = &dep_scheduler->sched;
> > > +	err = drm_sched_entity_init(&dep_scheduler->entity, 0,
> > > +				    (struct drm_gpu_scheduler
> > > **)&sched, 1,
> > > +				    NULL);
> > > +	if (err)
> > > +		goto err_sched;
> > > +
> > > +	init_rcu_head(&dep_scheduler->rcu);
> > > +
> > > +	return dep_scheduler;
> > > +
> > > +err_sched:
> > > +	drm_sched_fini(&dep_scheduler->sched);
> > > +err_free:
> > > +	kfree(dep_scheduler);
> > > +
> > > +	return ERR_PTR(err);
> > > +}
> > > +
> > > +/**
> > > + * xe_dep_scheduler_fini() - Generic Xe dependency scheduler
> > > finalize
> > > + * @dep_scheduler: Generic Xe dependency scheduler object
> > > + *
> > > + * Finalize internal DRM scheduler objects and free generic Xe
> > > dependency
> > > + * scheduler object
> > > + */
> > > +void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler)
> > > +{
> > > +	drm_sched_entity_fini(&dep_scheduler->entity);
> > > +	drm_sched_fini(&dep_scheduler->sched);
> > > +	/*
> > > +	 * RCU free due sched being exported via DRM scheduler
> > > fences
> > > +	 * (timeline name).
> > > +	 */
> > > +	kfree_rcu(dep_scheduler, rcu);
> > > +}
> > > +
> > > +/**
> > > + * xe_dep_scheduler_entity() - Retrieve a generic Xe dependency
> > > scheduler
> > > + *                             DRM scheduler entity
> > > + * @dep_scheduler: Generic Xe dependency scheduler object
> > > + *
> > > + * Return: The generic Xe dependency scheduler's DRM scheduler
> > > entity
> > > + */
> > > +struct drm_sched_entity *
> > > +xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler)
> > > +{
> > > +	return &dep_scheduler->entity;
> > > +}
> > > diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.h
> > > b/drivers/gpu/drm/xe/xe_dep_scheduler.h
> > > new file mode 100644
> > > index 000000000000..853961eec64b
> > > --- /dev/null
> > > +++ b/drivers/gpu/drm/xe/xe_dep_scheduler.h
> > > @@ -0,0 +1,21 @@
> > > +/* SPDX-License-Identifier: MIT */
> > > +/*
> > > + * Copyright © 2025 Intel Corporation
> > > + */
> > > +
> > > +#include <linux/types.h>
> > > +
> > > +struct drm_sched_entity;
> > > +struct workqueue_struct;
> > > +struct xe_dep_scheduler;
> > > +struct xe_device;
> > > +
> > > +struct xe_dep_scheduler *
> > > +xe_dep_scheduler_create(struct xe_device *xe,
> > > +			struct workqueue_struct *submit_wq,
> > > +			const char *name, u32 job_limit);
> > > +
> > > +void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler);
> > > +
> > > +struct drm_sched_entity *
> > > +xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler);
> > 


More information about the Intel-xe mailing list