[Intel-xe] [PATCH v5 2/7] drm/sched: Convert drm scheduler to use a work queue rather than kthread
Luben Tuikov
luben.tuikov at amd.com
Sat Oct 14 01:30:27 UTC 2023
On 2023-10-11 19:58, Matthew Brost wrote:
> In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1
> mapping between a drm_gpu_scheduler and drm_sched_entity. At first this
> seems a bit odd but let us explain the reasoning below.
>
> 1. In Xe the submission order from multiple drm_sched_entity is not
> guaranteed to be the same completion even if targeting the same hardware
> engine. This is because in Xe we have a firmware scheduler, the GuC,
> which allowed to reorder, timeslice, and preempt submissions. If a using
> shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls
> apart as the TDR expects submission order == completion order. Using a
> dedicated drm_gpu_scheduler per drm_sched_entity solve this problem.
>
> 2. In Xe submissions are done via programming a ring buffer (circular
> buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the
> limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow
> control on the ring for free.
>
> A problem with this design is currently a drm_gpu_scheduler uses a
> kthread for submission / job cleanup. This doesn't scale if a large
> number of drm_gpu_scheduler are used. To work around the scaling issue,
> use a worker rather than kthread for submission / job cleanup.
>
> v2:
> - (Rob Clark) Fix msm build
> - Pass in run work queue
> v3:
> - (Boris) don't have loop in worker
> v4:
> - (Tvrtko) break out submit ready, stop, start helpers into own patch
> v5:
> - (Boris) default to ordered work queue
> v6:
> - (Luben / checkpatch) fix alignment in msm_ringbuffer.c
> - (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue
> - (Luben) Update comment for drm_sched_wqueue_enqueue
> - (Luben) Positive check for submit_wq in drm_sched_init
> - (Luben) s/alloc_submit_wq/own_submit_wq
>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +-
> drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 +-
> drivers/gpu/drm/lima/lima_sched.c | 2 +-
> drivers/gpu/drm/msm/msm_ringbuffer.c | 7 +-
> drivers/gpu/drm/nouveau/nouveau_sched.c | 2 +-
> drivers/gpu/drm/panfrost/panfrost_job.c | 2 +-
> drivers/gpu/drm/scheduler/sched_main.c | 118 ++++++++++-----------
> drivers/gpu/drm/v3d/v3d_sched.c | 10 +-
> include/drm/gpu_scheduler.h | 14 ++-
> 9 files changed, 82 insertions(+), 77 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index b1aafe815f28..b54c4d771104 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2279,7 +2279,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
> break;
> }
>
> - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
> + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
> ring->num_hw_submission, 0,
> timeout, adev->reset_domain->wq,
> ring->sched_score, ring->name,
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 345fec6cb1a4..618a804ddc34 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
> {
> int ret;
>
> - ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
> + ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
> etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
> msecs_to_jiffies(500), NULL, NULL,
> dev_name(gpu->dev), gpu->dev);
> diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
> index ffd91a5ee299..8d858aed0e56 100644
> --- a/drivers/gpu/drm/lima/lima_sched.c
> +++ b/drivers/gpu/drm/lima/lima_sched.c
> @@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
>
> INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
>
> - return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
> + return drm_sched_init(&pipe->base, &lima_sched_ops, NULL, 1,
> lima_job_hang_limit,
> msecs_to_jiffies(timeout), NULL,
> NULL, name, pipe->ldev->dev);
> diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
> index 40c0bc35a44c..1097f8e93d6b 100644
> --- a/drivers/gpu/drm/msm/msm_ringbuffer.c
> +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
> @@ -94,9 +94,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
> /* currently managing hangcheck ourselves: */
> sched_timeout = MAX_SCHEDULE_TIMEOUT;
>
> - ret = drm_sched_init(&ring->sched, &msm_sched_ops,
> - num_hw_submissions, 0, sched_timeout,
> - NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
> + ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
> + num_hw_submissions, 0, sched_timeout,
> + NULL, NULL, to_msm_bo(ring->bo)->name,
> + gpu->dev->dev);
> if (ret) {
> goto fail;
> }
> diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
> index 3b7ea5221226..4c959dec42b3 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_sched.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
> @@ -435,7 +435,7 @@ int nouveau_sched_init(struct nouveau_drm *drm)
> if (!drm->sched_wq)
> return -ENOMEM;
>
> - return drm_sched_init(sched, &nouveau_sched_ops,
> + return drm_sched_init(sched, &nouveau_sched_ops, NULL,
> NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
> NULL, NULL, "nouveau_sched", drm->dev->dev);
> }
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index fb16de2d0420..934b7b930c76 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -852,7 +852,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
> js->queue[j].fence_context = dma_fence_context_alloc(1);
>
> ret = drm_sched_init(&js->queue[j].sched,
> - &panfrost_sched_ops,
> + &panfrost_sched_ops, NULL,
> nentries, 0,
> msecs_to_jiffies(JOB_TIMEOUT_MS),
> pfdev->reset.wq,
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 6f2f7dd4ba0a..8b1d52cff1e9 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -48,7 +48,6 @@
> * through the jobs entity pointer.
> */
>
> -#include <linux/kthread.h>
> #include <linux/wait.h>
> #include <linux/sched.h>
> #include <linux/completion.h>
> @@ -256,6 +255,16 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
> return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
> }
>
> +/**
> + * drm_sched_wqueue_enqueue - enqueue scheduler submission
> + * @sched: scheduler instance
> + */
> +static void drm_sched_wqueue_enqueue(struct drm_gpu_scheduler *sched)
> +{
> + if (!READ_ONCE(sched->pause_submit))
> + queue_work(sched->submit_wq, &sched->work_submit);
> +}
> +
> /**
> * drm_sched_job_done - complete a job
> * @s_job: pointer to the job which is done
> @@ -275,7 +284,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
> dma_fence_get(&s_fence->finished);
> drm_sched_fence_finished(s_fence, result);
> dma_fence_put(&s_fence->finished);
> - wake_up_interruptible(&sched->wake_up_worker);
> + drm_sched_wqueue_enqueue(sched);
> }
>
> /**
> @@ -868,7 +877,7 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
> void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
> {
> if (drm_sched_can_queue(sched))
> - wake_up_interruptible(&sched->wake_up_worker);
> + drm_sched_wqueue_enqueue(sched);
> }
>
> /**
> @@ -978,61 +987,42 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
> }
> EXPORT_SYMBOL(drm_sched_pick_best);
>
> -/**
> - * drm_sched_blocked - check if the scheduler is blocked
> - *
> - * @sched: scheduler instance
> - *
> - * Returns true if blocked, otherwise false.
> - */
> -static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
> -{
> - if (kthread_should_park()) {
> - kthread_parkme();
> - return true;
> - }
> -
> - return false;
> -}
> -
> /**
> * drm_sched_main - main scheduler thread
> *
> * @param: scheduler instance
> - *
> - * Returns 0.
> */
> -static int drm_sched_main(void *param)
> +static void drm_sched_main(struct work_struct *w)
> {
> - struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
> + struct drm_gpu_scheduler *sched =
> + container_of(w, struct drm_gpu_scheduler, work_submit);
> + struct drm_sched_entity *entity;
> + struct drm_sched_job *cleanup_job;
> int r;
>
> - sched_set_fifo_low(current);
> + if (READ_ONCE(sched->pause_submit))
> + return;
>
> - while (!kthread_should_stop()) {
> - struct drm_sched_entity *entity = NULL;
> - struct drm_sched_fence *s_fence;
> - struct drm_sched_job *sched_job;
> - struct dma_fence *fence;
> - struct drm_sched_job *cleanup_job = NULL;
> + cleanup_job = drm_sched_get_cleanup_job(sched);
> + entity = drm_sched_select_entity(sched);
>
> - wait_event_interruptible(sched->wake_up_worker,
> - (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
> - (!drm_sched_blocked(sched) &&
> - (entity = drm_sched_select_entity(sched))) ||
> - kthread_should_stop());
> + if (!entity && !cleanup_job)
> + return; /* No more work */
>
> - if (cleanup_job)
> - sched->ops->free_job(cleanup_job);
> + if (cleanup_job)
> + sched->ops->free_job(cleanup_job);
>
> - if (!entity)
> - continue;
> + if (entity) {
> + struct dma_fence *fence;
> + struct drm_sched_fence *s_fence;
> + struct drm_sched_job *sched_job;
>
> sched_job = drm_sched_entity_pop_job(entity);
> -
> if (!sched_job) {
> complete_all(&entity->entity_idle);
> - continue;
> + if (!cleanup_job)
> + return; /* No more work */
> + goto again;
> }
>
> s_fence = sched_job->s_fence;
> @@ -1063,7 +1053,9 @@ static int drm_sched_main(void *param)
>
> wake_up(&sched->job_scheduled);
> }
> - return 0;
> +
> +again:
> + drm_sched_wqueue_enqueue(sched);
> }
>
> /**
> @@ -1071,6 +1063,8 @@ static int drm_sched_main(void *param)
> *
> * @sched: scheduler instance
> * @ops: backend operations for this scheduler
> + * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
> + * allocated and used
> * @hw_submission: number of hw submissions that can be in flight
> * @hang_limit: number of times to allow a job to hang before dropping it
> * @timeout: timeout value in jiffies for the scheduler
> @@ -1084,14 +1078,25 @@ static int drm_sched_main(void *param)
> */
> int drm_sched_init(struct drm_gpu_scheduler *sched,
> const struct drm_sched_backend_ops *ops,
> + struct workqueue_struct *submit_wq,
> unsigned hw_submission, unsigned hang_limit,
> long timeout, struct workqueue_struct *timeout_wq,
> atomic_t *score, const char *name, struct device *dev)
> {
> - int i, ret;
> + int i;
> sched->ops = ops;
> sched->hw_submission_limit = hw_submission;
> sched->name = name;
> + if (submit_wq) {
> + sched->submit_wq = submit_wq;
> + sched->own_submit_wq = false;
> + } else {
> + sched->submit_wq = alloc_ordered_workqueue(name, 0);
> + if (!sched->submit_wq)
> + return -ENOMEM;
> +
> + sched->own_submit_wq = true;
> + }
> sched->timeout = timeout;
> sched->timeout_wq = timeout_wq ? : system_wq;
> sched->hang_limit = hang_limit;
> @@ -1100,23 +1105,15 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
> drm_sched_rq_init(sched, &sched->sched_rq[i]);
>
> - init_waitqueue_head(&sched->wake_up_worker);
> init_waitqueue_head(&sched->job_scheduled);
> INIT_LIST_HEAD(&sched->pending_list);
> spin_lock_init(&sched->job_list_lock);
> atomic_set(&sched->hw_rq_count, 0);
> INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
> + INIT_WORK(&sched->work_submit, drm_sched_main);
> atomic_set(&sched->_score, 0);
> atomic64_set(&sched->job_id_count, 0);
> -
> - /* Each scheduler will run on a seperate kernel thread */
> - sched->thread = kthread_run(drm_sched_main, sched, sched->name);
> - if (IS_ERR(sched->thread)) {
> - ret = PTR_ERR(sched->thread);
> - sched->thread = NULL;
> - DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
> - return ret;
> - }
> + sched->pause_submit = false;
>
> sched->ready = true;
> return 0;
> @@ -1135,8 +1132,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
> struct drm_sched_entity *s_entity;
> int i;
>
> - if (sched->thread)
> - kthread_stop(sched->thread);
> + drm_sched_wqueue_stop(sched);
>
> for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
> struct drm_sched_rq *rq = &sched->sched_rq[i];
> @@ -1159,6 +1155,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
> /* Confirm no work left behind accessing device structures */
> cancel_delayed_work_sync(&sched->work_tdr);
>
> + if (sched->own_submit_wq)
> + destroy_workqueue(sched->submit_wq);
> sched->ready = false;
> }
> EXPORT_SYMBOL(drm_sched_fini);
> @@ -1216,7 +1214,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
> */
> bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
> {
> - return !!sched->thread;
> + return sched->ready;
> }
> EXPORT_SYMBOL(drm_sched_wqueue_ready);
>
> @@ -1227,7 +1225,8 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready);
> */
> void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
> {
> - kthread_park(sched->thread);
> + WRITE_ONCE(sched->pause_submit, true);
> + cancel_work_sync(&sched->work_submit);
> }
> EXPORT_SYMBOL(drm_sched_wqueue_stop);
>
> @@ -1238,6 +1237,7 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop);
> */
> void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
> {
> - kthread_unpark(sched->thread);
> + WRITE_ONCE(sched->pause_submit, false);
> + queue_work(sched->submit_wq, &sched->work_submit);
> }
> EXPORT_SYMBOL(drm_sched_wqueue_start);
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 06238e6d7f5c..38e092ea41e6 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -388,7 +388,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> int ret;
>
> ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
> - &v3d_bin_sched_ops,
> + &v3d_bin_sched_ops, NULL,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms), NULL,
> NULL, "v3d_bin", v3d->drm.dev);
> @@ -396,7 +396,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> return ret;
>
> ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
> - &v3d_render_sched_ops,
> + &v3d_render_sched_ops, NULL,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms), NULL,
> NULL, "v3d_render", v3d->drm.dev);
> @@ -404,7 +404,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> goto fail;
>
> ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
> - &v3d_tfu_sched_ops,
> + &v3d_tfu_sched_ops, NULL,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms), NULL,
> NULL, "v3d_tfu", v3d->drm.dev);
> @@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
>
> if (v3d_has_csd(v3d)) {
> ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
> - &v3d_csd_sched_ops,
> + &v3d_csd_sched_ops, NULL,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms), NULL,
> NULL, "v3d_csd", v3d->drm.dev);
> @@ -421,7 +421,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> goto fail;
>
> ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
> - &v3d_cache_clean_sched_ops,
> + &v3d_cache_clean_sched_ops, NULL,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms), NULL,
> NULL, "v3d_cache_clean", v3d->drm.dev);
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 38578fe74573..211bd3cdabdc 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -473,17 +473,16 @@ struct drm_sched_backend_ops {
> * @timeout: the time after which a job is removed from the scheduler.
> * @name: name of the ring for which this scheduler is being used.
> * @sched_rq: priority wise array of run queues.
> - * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
> - * is ready to be scheduled.
> * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
> * waits on this wait queue until all the scheduled jobs are
> * finished.
> * @hw_rq_count: the number of jobs currently in the hardware queue.
> * @job_id_count: used to assign unique id to the each job.
> + * @submit_wq: workqueue used to queue @work_submit
> * @timeout_wq: workqueue used to queue @work_tdr
> + * @work_submit: schedules jobs and cleans up entities
> * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
> * timeout interval is over.
> - * @thread: the kthread on which the scheduler which run.
> * @pending_list: the list of jobs which are currently in the job queue.
> * @job_list_lock: lock to protect the pending_list.
> * @hang_limit: once the hangs by a job crosses this limit then it is marked
> @@ -492,6 +491,8 @@ struct drm_sched_backend_ops {
> * @_score: score used when the driver doesn't provide one
> * @ready: marks if the underlying HW is ready to work
> * @free_guilty: A hit to time out handler to free the guilty job.
> + * @pause_submit: pause queuing of @work_submit on @submit_wq
> + * @own_submit_wq: scheduler owns allocation of @submit_wq
> * @dev: system &struct device
> *
> * One scheduler is implemented for each hardware ring.
> @@ -502,13 +503,13 @@ struct drm_gpu_scheduler {
> long timeout;
> const char *name;
> struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
> - wait_queue_head_t wake_up_worker;
> wait_queue_head_t job_scheduled;
> atomic_t hw_rq_count;
> atomic64_t job_id_count;
> + struct workqueue_struct *submit_wq;
> struct workqueue_struct *timeout_wq;
> + struct work_struct work_submit;
> struct delayed_work work_tdr;
> - struct task_struct *thread;
> struct list_head pending_list;
> spinlock_t job_list_lock;
> int hang_limit;
> @@ -516,11 +517,14 @@ struct drm_gpu_scheduler {
> atomic_t _score;
> bool ready;
> bool free_guilty;
> + bool pause_submit;
> + bool own_submit_wq;
> struct device *dev;
> };
>
> int drm_sched_init(struct drm_gpu_scheduler *sched,
> const struct drm_sched_backend_ops *ops,
> + struct workqueue_struct *submit_wq,
> uint32_t hw_submission, unsigned hang_limit,
> long timeout, struct workqueue_struct *timeout_wq,
> atomic_t *score, const char *name, struct device *dev);
--
Regards,
Luben
More information about the Intel-xe
mailing list