[PATCH v2 4/9] drm/sched: Split free_job into own work item
Christian König
christian.koenig at amd.com
Fri Aug 25 13:45:49 UTC 2023
Am 25.08.23 um 15:36 schrieb Matthew Brost:
> On Fri, Aug 25, 2023 at 10:02:32AM +0200, Christian König wrote:
>> Am 25.08.23 um 04:58 schrieb Matthew Brost:
>>> On Fri, Aug 25, 2023 at 01:04:10AM +0200, Danilo Krummrich wrote:
>>>> On Thu, Aug 10, 2023 at 07:31:32PM -0700, Matthew Brost wrote:
>>>>> Rather than call free_job and run_job in same work item have a dedicated
>>>>> work item for each. This aligns with the design and intended use of work
>>>>> queues.
>>>>>
>>>>> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
>>>>> ---
>>>>> drivers/gpu/drm/scheduler/sched_main.c | 137 ++++++++++++++++++-------
>>>>> include/drm/gpu_scheduler.h | 8 +-
>>>>> 2 files changed, 106 insertions(+), 39 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>>>> index cede47afc800..b67469eac179 100644
>>>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>>>> @@ -213,11 +213,12 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
>>>>> * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
>>>>> *
>>>>> * @rq: scheduler run queue to check.
>>>>> + * @dequeue: dequeue selected entity
>>>>> *
>>>>> * Try to find a ready entity, returns NULL if none found.
>>>>> */
>>>>> static struct drm_sched_entity *
>>>>> -drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
>>>>> +drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq, bool dequeue)
>>>>> {
>>>>> struct drm_sched_entity *entity;
>>>>> @@ -227,8 +228,10 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
>>>>> if (entity) {
>>>>> list_for_each_entry_continue(entity, &rq->entities, list) {
>>>>> if (drm_sched_entity_is_ready(entity)) {
>>>>> - rq->current_entity = entity;
>>>>> - reinit_completion(&entity->entity_idle);
>>>>> + if (dequeue) {
>>>>> + rq->current_entity = entity;
>>>>> + reinit_completion(&entity->entity_idle);
>>>>> + }
>>>>> spin_unlock(&rq->lock);
>>>>> return entity;
>>>>> }
>>>>> @@ -238,8 +241,10 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
>>>>> list_for_each_entry(entity, &rq->entities, list) {
>>>>> if (drm_sched_entity_is_ready(entity)) {
>>>>> - rq->current_entity = entity;
>>>>> - reinit_completion(&entity->entity_idle);
>>>>> + if (dequeue) {
>>>>> + rq->current_entity = entity;
>>>>> + reinit_completion(&entity->entity_idle);
>>>>> + }
>>>>> spin_unlock(&rq->lock);
>>>>> return entity;
>>>>> }
>>>>> @@ -257,11 +262,12 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
>>>>> * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
>>>>> *
>>>>> * @rq: scheduler run queue to check.
>>>>> + * @dequeue: dequeue selected entity
>>>>> *
>>>>> * Find oldest waiting ready entity, returns NULL if none found.
>>>>> */
>>>>> static struct drm_sched_entity *
>>>>> -drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
>>>>> +drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq, bool dequeue)
>>>>> {
>>>>> struct rb_node *rb;
>>>>> @@ -271,8 +277,10 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
>>>>> entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
>>>>> if (drm_sched_entity_is_ready(entity)) {
>>>>> - rq->current_entity = entity;
>>>>> - reinit_completion(&entity->entity_idle);
>>>>> + if (dequeue) {
>>>>> + rq->current_entity = entity;
>>>>> + reinit_completion(&entity->entity_idle);
>>>>> + }
>>>>> break;
>>>>> }
>>>>> }
>>>>> @@ -282,13 +290,54 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
>>>>> }
>>>>> /**
>>>>> - * drm_sched_submit_queue - scheduler queue submission
>>>>> + * drm_sched_run_job_queue - queue job submission
>>>>> * @sched: scheduler instance
>>>>> */
>>>>> -static void drm_sched_submit_queue(struct drm_gpu_scheduler *sched)
>>>>> +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
>>>>> {
>>>>> if (!READ_ONCE(sched->pause_submit))
>>>>> - queue_work(sched->submit_wq, &sched->work_submit);
>>>>> + queue_work(sched->submit_wq, &sched->work_run_job);
>>>>> +}
>>>>> +
>>>>> +static struct drm_sched_entity *
>>>>> +drm_sched_select_entity(struct drm_gpu_scheduler *sched, bool dequeue);
>>>>> +
>>>>> +/**
>>>>> + * drm_sched_run_job_queue_if_ready - queue job submission if ready
>>>>> + * @sched: scheduler instance
>>>>> + */
>>>>> +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched)
>>>>> +{
>>>>> + if (drm_sched_select_entity(sched, false))
>>>>> + drm_sched_run_job_queue(sched);
>>>>> +}
>>>>> +
>>>>> +/**
>>>>> + * drm_sched_free_job_queue - queue free job
>>>>> + *
>>>>> + * @sched: scheduler instance to queue free job
>>>>> + */
>>>>> +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched)
>>>>> +{
>>>>> + if (!READ_ONCE(sched->pause_submit))
>>>>> + queue_work(sched->submit_wq, &sched->work_free_job);
>>>>> +}
>>>>> +
>>>>> +/**
>>>>> + * drm_sched_free_job_queue_if_ready - queue free job if ready
>>>>> + *
>>>>> + * @sched: scheduler instance to queue free job
>>>>> + */
>>>>> +static void drm_sched_free_job_queue_if_ready(struct drm_gpu_scheduler *sched)
>>>>> +{
>>>>> + struct drm_sched_job *job;
>>>>> +
>>>>> + spin_lock(&sched->job_list_lock);
>>>>> + job = list_first_entry_or_null(&sched->pending_list,
>>>>> + struct drm_sched_job, list);
>>>>> + if (job && dma_fence_is_signaled(&job->s_fence->finished))
>>>>> + drm_sched_free_job_queue(sched);
>>>>> + spin_unlock(&sched->job_list_lock);
>>>>> }
>>>>> /**
>>>>> @@ -310,7 +359,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
>>>>> dma_fence_get(&s_fence->finished);
>>>>> drm_sched_fence_finished(s_fence, result);
>>>>> dma_fence_put(&s_fence->finished);
>>>>> - drm_sched_submit_queue(sched);
>>>>> + drm_sched_free_job_queue(sched);
>>>>> }
>>>>> /**
>>>>> @@ -906,18 +955,19 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
>>>>> void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
>>>>> {
>>>>> if (drm_sched_can_queue(sched))
>>>>> - drm_sched_submit_queue(sched);
>>>>> + drm_sched_run_job_queue(sched);
>>>>> }
>>>>> /**
>>>>> * drm_sched_select_entity - Select next entity to process
>>>>> *
>>>>> * @sched: scheduler instance
>>>>> + * @dequeue: dequeue selected entity
>>>>> *
>>>>> * Returns the entity to process or NULL if none are found.
>>>>> */
>>>>> static struct drm_sched_entity *
>>>>> -drm_sched_select_entity(struct drm_gpu_scheduler *sched)
>>>>> +drm_sched_select_entity(struct drm_gpu_scheduler *sched, bool dequeue)
>>>>> {
>>>>> struct drm_sched_entity *entity;
>>>>> int i;
>>>>> @@ -935,8 +985,10 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
>>>>> /* Kernel run queue has higher priority than normal run queue*/
>>>>> for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
>>>>> entity = sched->sched_policy == DRM_SCHED_POLICY_FIFO ?
>>>>> - drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
>>>>> - drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
>>>>> + drm_sched_rq_select_entity_fifo(&sched->sched_rq[i],
>>>>> + dequeue) :
>>>>> + drm_sched_rq_select_entity_rr(&sched->sched_rq[i],
>>>>> + dequeue);
>>>>> if (entity)
>>>>> break;
>>>>> }
>>>>> @@ -1024,30 +1076,44 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
>>>>> EXPORT_SYMBOL(drm_sched_pick_best);
>>>>> /**
>>>>> - * drm_sched_main - main scheduler thread
>>>>> + * drm_sched_free_job_work - worker to call free_job
>>>>> *
>>>>> - * @param: scheduler instance
>>>>> + * @w: free job work
>>>>> */
>>>>> -static void drm_sched_main(struct work_struct *w)
>>>>> +static void drm_sched_free_job_work(struct work_struct *w)
>>>>> {
>>>>> struct drm_gpu_scheduler *sched =
>>>>> - container_of(w, struct drm_gpu_scheduler, work_submit);
>>>>> - struct drm_sched_entity *entity;
>>>>> + container_of(w, struct drm_gpu_scheduler, work_free_job);
>>>>> struct drm_sched_job *cleanup_job;
>>>>> - int r;
>>>>> if (READ_ONCE(sched->pause_submit))
>>>>> return;
>>>>> cleanup_job = drm_sched_get_cleanup_job(sched);
>>>> I tried this patch with Nouveau and found a race condition:
>>>>
>>>> In drm_sched_run_job_work() the job is added to the pending_list via
>>>> drm_sched_job_begin(), then the run_job() callback is called and the scheduled
>>>> fence is signaled.
>>>>
>>>> However, in parallel drm_sched_get_cleanup_job() might be called from
>>>> drm_sched_free_job_work(), which picks the first job from the pending_list and
>>>> for the next job on the pending_list sets the scheduled fence' timestamp field.
>> Well why can this happen in parallel? Either the work items are scheduled to
>> a single threaded work queue or you have protected the pending list with
>> some locks.
>>
> Xe uses a single-threaded work queue, Nouveau does not (desired
> behavior).
>
> The list of pending jobs is protected by a lock (safe), the race is:
>
> add job to pending list
> run_job
> signal scheduled fence
>
> dequeue from pending list
> free_job
> update timestamp
>
> Once a job is on the pending list its timestamp can be accessed which
> can blow up if scheduled fence isn't signaled or more specifically unless
> DMA_FENCE_FLAG_TIMESTAMP_BIT is set.
Ah, that problem again. No that is actually quite harmless.
You just need to double check if the DMA_FENCE_FLAG_TIMESTAMP_BIT is
already set and if it's not set don't do anything.
Regards,
Christian.
> Logically it makes sense for the
> job to be in the pending list before run_job and signal the scheduled
> fence after run_job so I think we need to live with this race.
>
>> Just moving the free_job into a separate work item without such precautions
>> won't work because of quite a bunch of other reasons as well.
>>
> Yes, free_job might not be safe to run in parallel with run_job
> depending on the driver vfuncs. Mention this in the cover letter.
>
> Certainly this should be safe in the scheduler code though and I think
> it will be after fixing this.
>
> Matt
>
>>>> The job can be on the pending_list, but the scheduled fence might not yet be
>>>> signaled. The call to actually signal the fence will subsequently fault because
>>>> it will try to dereference the timestamp.
>>>>
>>>> I'm not sure what's the best way to fix this, maybe it's enough to re-order
>>>> signalling the scheduled fence and adding the job to the pending_list. Not sure
>>>> if this has other implications though.
>>>>
>>> We really want the job on the pending list before calling run_job.
>>>
>>> I'm thinking we just delete the updating of the timestamp, not sure why
>>> this is useful.
>> This is used for calculating how long each job has spend on the hw, so big
>> NAK to deleting this.
>>
> Ah, I see that AMDGPU uses this. Previously just checked the scheduler
> code.
>
> The below patch should work just fine then.
>
> Matt
>
>> Regards,
>> Christian.
>>
>>> Or we could do something like this where we try to update the timestamp,
>>> if we can't update the timestamp run_job worker will do it anyways.
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
>>> index 67e0fb6e7d18..54bd3e88f139 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -1074,8 +1074,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
>>> typeof(*next), list);
>>>
>>> if (next) {
>>> - next->s_fence->scheduled.timestamp =
>>> - job->s_fence->finished.timestamp;
>>> + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
>>> + &next->s_fence->scheduled.flags))
>>> + next->s_fence->scheduled.timestamp =
>>> + job->s_fence->finished.timestamp;
>>> /* start TO timer for next job */
>>> drm_sched_start_timeout(sched);
>>> }
>>>
>>> I guess I'm leaning towards the latter option.
>>>
>>> Matt
>>>
>>>> - Danilo
>>>>
>>>>> - entity = drm_sched_select_entity(sched);
>>>>> + if (cleanup_job) {
>>>>> + sched->ops->free_job(cleanup_job);
>>>>> +
>>>>> + drm_sched_free_job_queue_if_ready(sched);
>>>>> + drm_sched_run_job_queue_if_ready(sched);
>>>>> + }
>>>>> +}
>>>>> - if (!entity && !cleanup_job)
>>>>> - return; /* No more work */
>>>>> +/**
>>>>> + * drm_sched_run_job_work - worker to call run_job
>>>>> + *
>>>>> + * @w: run job work
>>>>> + */
>>>>> +static void drm_sched_run_job_work(struct work_struct *w)
>>>>> +{
>>>>> + struct drm_gpu_scheduler *sched =
>>>>> + container_of(w, struct drm_gpu_scheduler, work_run_job);
>>>>> + struct drm_sched_entity *entity;
>>>>> + int r;
>>>>> - if (cleanup_job)
>>>>> - sched->ops->free_job(cleanup_job);
>>>>> + if (READ_ONCE(sched->pause_submit))
>>>>> + return;
>>>>> + entity = drm_sched_select_entity(sched, true);
>>>>> if (entity) {
>>>>> struct dma_fence *fence;
>>>>> struct drm_sched_fence *s_fence;
>>>>> @@ -1056,9 +1122,7 @@ static void drm_sched_main(struct work_struct *w)
>>>>> sched_job = drm_sched_entity_pop_job(entity);
>>>>> if (!sched_job) {
>>>>> complete_all(&entity->entity_idle);
>>>>> - if (!cleanup_job)
>>>>> - return; /* No more work */
>>>>> - goto again;
>>>>> + return; /* No more work */
>>>>> }
>>>>> s_fence = sched_job->s_fence;
>>>>> @@ -1088,10 +1152,8 @@ static void drm_sched_main(struct work_struct *w)
>>>>> }
>>>>> wake_up(&sched->job_scheduled);
>>>>> + drm_sched_run_job_queue_if_ready(sched);
>>>>> }
>>>>> -
>>>>> -again:
>>>>> - drm_sched_submit_queue(sched);
>>>>> }
>>>>> /**
>>>>> @@ -1150,7 +1212,8 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>>>>> spin_lock_init(&sched->job_list_lock);
>>>>> atomic_set(&sched->hw_rq_count, 0);
>>>>> INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
>>>>> - INIT_WORK(&sched->work_submit, drm_sched_main);
>>>>> + INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
>>>>> + INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
>>>>> atomic_set(&sched->_score, 0);
>>>>> atomic64_set(&sched->job_id_count, 0);
>>>>> sched->pause_submit = false;
>>>>> @@ -1275,7 +1338,8 @@ EXPORT_SYMBOL(drm_sched_submit_ready);
>>>>> void drm_sched_submit_stop(struct drm_gpu_scheduler *sched)
>>>>> {
>>>>> WRITE_ONCE(sched->pause_submit, true);
>>>>> - cancel_work_sync(&sched->work_submit);
>>>>> + cancel_work_sync(&sched->work_run_job);
>>>>> + cancel_work_sync(&sched->work_free_job);
>>>>> }
>>>>> EXPORT_SYMBOL(drm_sched_submit_stop);
>>>>> @@ -1287,6 +1351,7 @@ EXPORT_SYMBOL(drm_sched_submit_stop);
>>>>> void drm_sched_submit_start(struct drm_gpu_scheduler *sched)
>>>>> {
>>>>> WRITE_ONCE(sched->pause_submit, false);
>>>>> - queue_work(sched->submit_wq, &sched->work_submit);
>>>>> + queue_work(sched->submit_wq, &sched->work_run_job);
>>>>> + queue_work(sched->submit_wq, &sched->work_free_job);
>>>>> }
>>>>> EXPORT_SYMBOL(drm_sched_submit_start);
>>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>>> index 04eec2d7635f..fbc083a92757 100644
>>>>> --- a/include/drm/gpu_scheduler.h
>>>>> +++ b/include/drm/gpu_scheduler.h
>>>>> @@ -487,9 +487,10 @@ struct drm_sched_backend_ops {
>>>>> * finished.
>>>>> * @hw_rq_count: the number of jobs currently in the hardware queue.
>>>>> * @job_id_count: used to assign unique id to the each job.
>>>>> - * @submit_wq: workqueue used to queue @work_submit
>>>>> + * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
>>>>> * @timeout_wq: workqueue used to queue @work_tdr
>>>>> - * @work_submit: schedules jobs and cleans up entities
>>>>> + * @work_run_job: schedules jobs
>>>>> + * @work_free_job: cleans up jobs
>>>>> * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
>>>>> * timeout interval is over.
>>>>> * @pending_list: the list of jobs which are currently in the job queue.
>>>>> @@ -518,7 +519,8 @@ struct drm_gpu_scheduler {
>>>>> atomic64_t job_id_count;
>>>>> struct workqueue_struct *submit_wq;
>>>>> struct workqueue_struct *timeout_wq;
>>>>> - struct work_struct work_submit;
>>>>> + struct work_struct work_run_job;
>>>>> + struct work_struct work_free_job;
>>>>> struct delayed_work work_tdr;
>>>>> struct list_head pending_list;
>>>>> spinlock_t job_list_lock;
>>>>> --
>>>>> 2.34.1
>>>>>
More information about the dri-devel
mailing list