[RFC v4 04/16] drm/sched: Avoid double re-lock on the job free path

Philipp Stanner phasta at mailbox.org
Mon May 12 12:49:55 UTC 2025


On Fri, 2025-04-25 at 11:20 +0100, Tvrtko Ursulin wrote:
> Currently the job free work item will lock sched->job_list_lock first
> time
> to see if there are any jobs, free a single job, and then lock again
> to
> decide whether to re-queue itself if there are more finished jobs.
> 
> Since drm_sched_get_finished_job() already looks at the second job in
> the
> queue we can simply add the signaled check and have it return the
> presence
> of more jobs to free to the caller. That way the work item does not
> have
> to lock the list again and repeat the signaled check.

Are you convinced that this is worth it?

I'm torn. It's rare that one returns a status through a boolean by
reference.


Independently from that, this is a candidate which certainly can be
branched out from this series, to make the series completely about the
new scheduling policy, not general other improvements.


P.

> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
> Cc: Christian König <christian.koenig at amd.com>
> Cc: Danilo Krummrich <dakr at kernel.org>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Cc: Philipp Stanner <phasta at kernel.org>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 39 +++++++++++-------------
> --
>  1 file changed, 16 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
> b/drivers/gpu/drm/scheduler/sched_main.c
> index 86e40157b09b..a45b02fd2af3 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -365,22 +365,6 @@ static void __drm_sched_run_free_queue(struct
> drm_gpu_scheduler *sched)
>  		queue_work(sched->submit_wq, &sched->work_free_job);
>  }
>  
> -/**
> - * drm_sched_run_free_queue - enqueue free-job work if ready
> - * @sched: scheduler instance
> - */
> -static void drm_sched_run_free_queue(struct drm_gpu_scheduler
> *sched)
> -{
> -	struct drm_sched_job *job;
> -
> -	spin_lock(&sched->job_list_lock);
> -	job = list_first_entry_or_null(&sched->pending_list,
> -				       struct drm_sched_job, list);
> -	if (job && dma_fence_is_signaled(&job->s_fence->finished))
> -		__drm_sched_run_free_queue(sched);
> -	spin_unlock(&sched->job_list_lock);
> -}
> -
>  /**
>   * drm_sched_job_done - complete a job
>   * @s_job: pointer to the job which is done
> @@ -1097,12 +1081,13 @@ drm_sched_select_entity(struct
> drm_gpu_scheduler *sched)
>   * drm_sched_get_finished_job - fetch the next finished job to be
> destroyed
>   *
>   * @sched: scheduler instance
> + * @have_more: are there more finished jobs on the list
>   *
>   * Returns the next finished job from the pending list (if there is
> one)
>   * ready for it to be destroyed.
>   */
>  static struct drm_sched_job *
> -drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
> +drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool
> *have_more)
>  {
>  	struct drm_sched_job *job, *next;
>  
> @@ -1110,22 +1095,27 @@ drm_sched_get_finished_job(struct
> drm_gpu_scheduler *sched)
>  
>  	job = list_first_entry_or_null(&sched->pending_list,
>  				       struct drm_sched_job, list);
> -
>  	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
>  		/* remove job from pending_list */
>  		list_del_init(&job->list);
>  
>  		/* cancel this job's TO timer */
>  		cancel_delayed_work(&sched->work_tdr);
> -		/* make the scheduled timestamp more accurate */
> +
> +		*have_more = false;
>  		next = list_first_entry_or_null(&sched-
> >pending_list,
>  						typeof(*next),
> list);
> -
>  		if (next) {
> +			/* make the scheduled timestamp more
> accurate */
>  			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
>  				     &next->s_fence-
> >scheduled.flags))
>  				next->s_fence->scheduled.timestamp =
>  					dma_fence_timestamp(&job-
> >s_fence->finished);
> +
> +			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> +				     &next->s_fence-
> >finished.flags))
> +				*have_more = true;
> +
>  			/* start TO timer for next job */
>  			drm_sched_start_timeout(sched);
>  		}
> @@ -1184,12 +1174,15 @@ static void drm_sched_free_job_work(struct
> work_struct *w)
>  	struct drm_gpu_scheduler *sched =
>  		container_of(w, struct drm_gpu_scheduler,
> work_free_job);
>  	struct drm_sched_job *job;
> +	bool have_more;
>  
> -	job = drm_sched_get_finished_job(sched);
> -	if (job)
> +	job = drm_sched_get_finished_job(sched, &have_more);
> +	if (job) {
>  		sched->ops->free_job(job);
> +		if (have_more)
> +			__drm_sched_run_free_queue(sched);
> +	}
>  
> -	drm_sched_run_free_queue(sched);
>  	drm_sched_run_job_queue(sched);
>  }
>  



More information about the dri-devel mailing list