[PATCH] drm/sched: Avoid double re-lock on the job free path

Maíra Canal mcanal at igalia.com
Wed Jul 16 14:30:35 UTC 2025


Hi Tvrtko,

On 16/07/25 10:49, Tvrtko Ursulin wrote:
> 
> On 16/07/2025 14:31, Maíra Canal wrote:
>> Hi Tvrtko,
>>
>> On 16/07/25 05:51, Tvrtko Ursulin wrote:
>>> Currently the job free work item will lock sched->job_list_lock first 
>>> time
>>> to see if there are any jobs, free a single job, and then lock again to
>>> decide whether to re-queue itself if there are more finished jobs.
>>>
>>> Since drm_sched_get_finished_job() already looks at the second job in 
>>> the
>>> queue we can simply add the signaled check and have it return the 
>>> presence
>>> of more jobs to be freed to the caller. That way the work item does not
>>> have to lock the list again and repeat the signaled check.
>>>
>>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
>>> Cc: Christian König <christian.koenig at amd.com>
>>> Cc: Danilo Krummrich <dakr at kernel.org>
>>> Cc: Maíra Canal <mcanal at igalia.com>
>>> Cc: Matthew Brost <matthew.brost at intel.com>
>>> Cc: Philipp Stanner <phasta at kernel.org>
>>> ---
>>> v2:
>>>   * Improve commit text and kerneldoc. (Philipp)
>>>   * Rename run free work helper. (Philipp)
>>>
>>> v3:
>>>   * Rebase on top of Maira's changes.
>>> ---
>>>   drivers/gpu/drm/scheduler/sched_main.c | 53 ++++++++++----------------
>>>   1 file changed, 21 insertions(+), 32 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/ 
>>> drm/ scheduler/sched_main.c
>>> index e2cda28a1af4..5a550fd76bf0 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_main.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_main.c
>>> @@ -349,34 +349,13 @@ static void drm_sched_run_job_queue(struct 
>>> drm_gpu_scheduler *sched)
>>>   }
>>>   /**
>>> - * __drm_sched_run_free_queue - enqueue free-job work
>>> - * @sched: scheduler instance
>>> - */
>>> -static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
>>> -{
>>> -    if (!READ_ONCE(sched->pause_submit))
>>> -        queue_work(sched->submit_wq, &sched->work_free_job);
>>> -}
>>> -
>>> -/**
>>> - * drm_sched_run_free_queue - enqueue free-job work if ready
>>> + * drm_sched_run_free_queue - enqueue free-job work
>>>    * @sched: scheduler instance
>>>    */
>>>   static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
>>>   {
>>> -    struct drm_sched_job *job;
>>> -
>>> -    job = list_first_entry_or_null(&sched->pending_list,
>>> -                       struct drm_sched_job, list);
>>> -    if (job && dma_fence_is_signaled(&job->s_fence->finished))
>>> -        __drm_sched_run_free_queue(sched);
>>
>> I believe we'd still need this chunk for DRM_GPU_SCHED_STAT_NO_HANG
>> (check the comment in drm_sched_job_reinsert_on_false_timeout()). How
> 
> You mean the "is there a signaled job in the list check" is needed for 
> drm_sched_job_reinsert_on_false_timeout()? Hmm why? Worst case is a 
> false positive wakeup on the free worker, no?

Correct me if I'm mistaken, we would also have a false positive wake-up
on the run_job worker, which I believe it could be problematic in the
cases that we skipped the reset because the job is still running.

> 
>> about only deleting drm_sched_run_free_queue_unlocked() and keep using
>> __drm_sched_run_free_queue()?
> 
> You mean use __drm_sched_run_free_queue() from 
> drm_sched_job_reinsert_on_false_timeout()? That is the same as 
> drm_sched_run_free_queue() with this patch.

Sorry, I believe I didn't express myself clearly. I mean, using
__drm_sched_run_free_queue() in drm_sched_free_job_work() and keep using
drm_sched_run_free_queue() in drm_sched_job_reinsert_on_false_timeout().

Best Regards,
- Maíra

> 
> Regards,
> 
> Tvrtko
> 
>>> -}
>>> -
>>> -static void drm_sched_run_free_queue_unlocked(struct 
>>> drm_gpu_scheduler *sched)
>>> -{
>>> -    spin_lock(&sched->job_list_lock);
>>> -    drm_sched_run_free_queue(sched);
>>> -    spin_unlock(&sched->job_list_lock);
>>> +    if (!READ_ONCE(sched->pause_submit))
>>> +        queue_work(sched->submit_wq, &sched->work_free_job);
>>>   }
>>>   /**
>>> @@ -398,7 +377,7 @@ static void drm_sched_job_done(struct 
>>> drm_sched_job *s_job, int result)
>>>       dma_fence_get(&s_fence->finished);
>>>       drm_sched_fence_finished(s_fence, result);
>>>       dma_fence_put(&s_fence->finished);
>>> -    __drm_sched_run_free_queue(sched);
>>> +    drm_sched_run_free_queue(sched);
>>>   }
>>>   /**
>>> @@ -1134,12 +1113,16 @@ drm_sched_select_entity(struct 
>>> drm_gpu_scheduler *sched)
>>>    * drm_sched_get_finished_job - fetch the next finished job to be 
>>> destroyed
>>>    *
>>>    * @sched: scheduler instance
>>> + * @have_more: are there more finished jobs on the list
>>> + *
>>> + * Informs the caller through @have_more whether there are more 
>>> finished jobs
>>> + * besides the returned one.
>>>    *
>>>    * Returns the next finished job from the pending list (if there is 
>>> one)
>>>    * ready for it to be destroyed.
>>>    */
>>>   static struct drm_sched_job *
>>> -drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
>>> +drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool 
>>> *have_more)
>>>   {
>>>       struct drm_sched_job *job, *next;
>>> @@ -1147,22 +1130,25 @@ drm_sched_get_finished_job(struct 
>>> drm_gpu_scheduler *sched)
>>>       job = list_first_entry_or_null(&sched->pending_list,
>>>                          struct drm_sched_job, list);
>>> -
>>>       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
>>>           /* remove job from pending_list */
>>>           list_del_init(&job->list);
>>>           /* cancel this job's TO timer */
>>>           cancel_delayed_work(&sched->work_tdr);
>>> -        /* make the scheduled timestamp more accurate */
>>> +
>>> +        *have_more = false;
>>>           next = list_first_entry_or_null(&sched->pending_list,
>>>                           typeof(*next), list);
>>> -
>>>           if (next) {
>>> +            /* make the scheduled timestamp more accurate */
>>>               if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
>>>                        &next->s_fence->scheduled.flags))
>>>                   next->s_fence->scheduled.timestamp =
>>>                       dma_fence_timestamp(&job->s_fence->finished);
>>> +
>>> +            *have_more = dma_fence_is_signaled(&next->s_fence- 
>>> >finished);
>>> +
>>>               /* start TO timer for next job */
>>>               drm_sched_start_timeout(sched);
>>>           }
>>> @@ -1221,12 +1207,15 @@ static void drm_sched_free_job_work(struct 
>>> work_struct *w)
>>>       struct drm_gpu_scheduler *sched =
>>>           container_of(w, struct drm_gpu_scheduler, work_free_job);
>>>       struct drm_sched_job *job;
>>> +    bool have_more;
>>> -    job = drm_sched_get_finished_job(sched);
>>> -    if (job)
>>> +    job = drm_sched_get_finished_job(sched, &have_more);
>>> +    if (job) {
>>>           sched->ops->free_job(job);
>>> +        if (have_more)
>>> +            drm_sched_run_free_queue(sched);
>>> +    }
>>> -    drm_sched_run_free_queue_unlocked(sched);
>>>       drm_sched_run_job_queue(sched);
>>>   }
>>
> 



More information about the dri-devel mailing list