[PATCH v2] drm/sched: Avoid double re-lock on the job free path

Maíra Canal mcanal at igalia.com
Fri Jul 11 19:08:34 UTC 2025


Hi Tvrtko,

On 11/07/25 12:09, Tvrtko Ursulin wrote:
> Currently the job free work item will lock sched->job_list_lock first time
> to see if there are any jobs, free a single job, and then lock again to
> decide whether to re-queue itself if there are more finished jobs.
> 
> Since drm_sched_get_finished_job() already looks at the second job in the
> queue we can simply add the signaled check and have it return the presence
> of more jobs to be freed to the caller. That way the work item does not
> have to lock the list again and repeat the signaled check.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
> Cc: Christian König <christian.koenig at amd.com>
> Cc: Danilo Krummrich <dakr at kernel.org>
> Cc: Matthew Brost <matthew.brost at intel.com>
> Cc: Philipp Stanner <phasta at kernel.org>
> ---
> v2:
>   * Improve commit text and kerneldoc. (Philipp)
>   * Rename run free work helper. (Philipp)

Maybe, would it be possible not to rename it? Otherwise, I won't be able
to use the function name `drm_sched_run_free_queue()` in the
DRM_GPU_SCHED_STAT_NO_HANG series.

Not a big deal, but it would ease reintroducing
`drm_sched_run_free_queue()` if the series lands after this patch.

Best Regards,
- Maíra

> ---
>   drivers/gpu/drm/scheduler/sched_main.c | 48 +++++++++++---------------
>   1 file changed, 21 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 33d02b79674d..e183b305a51b 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -349,29 +349,13 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
>   }
>   
>   /**
> - * __drm_sched_run_free_queue - enqueue free-job work
> - * @sched: scheduler instance
> - */
> -static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
> -{
> -	if (!READ_ONCE(sched->pause_submit))
> -		queue_work(sched->submit_wq, &sched->work_free_job);
> -}
> -
> -/**
> - * drm_sched_run_free_queue - enqueue free-job work if ready
> + * drm_sched_run_free_queue - enqueue free-job work
>    * @sched: scheduler instance
>    */
>   static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
>   {
> -	struct drm_sched_job *job;
> -
> -	spin_lock(&sched->job_list_lock);
> -	job = list_first_entry_or_null(&sched->pending_list,
> -				       struct drm_sched_job, list);
> -	if (job && dma_fence_is_signaled(&job->s_fence->finished))
> -		__drm_sched_run_free_queue(sched);
> -	spin_unlock(&sched->job_list_lock);
> +	if (!READ_ONCE(sched->pause_submit))
> +		queue_work(sched->submit_wq, &sched->work_free_job);
>   }
>   
>   /**
> @@ -393,7 +377,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
>   	dma_fence_get(&s_fence->finished);
>   	drm_sched_fence_finished(s_fence, result);
>   	dma_fence_put(&s_fence->finished);
> -	__drm_sched_run_free_queue(sched);
> +	drm_sched_run_free_queue(sched);
>   }
>   
>   /**
> @@ -1094,12 +1078,16 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
>    * drm_sched_get_finished_job - fetch the next finished job to be destroyed
>    *
>    * @sched: scheduler instance
> + * @have_more: are there more finished jobs on the list
> + *
> + * Informs the caller through @have_more whether there are more finished jobs
> + * besides the returned one.
>    *
>    * Returns the next finished job from the pending list (if there is one)
>    * ready for it to be destroyed.
>    */
>   static struct drm_sched_job *
> -drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
> +drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
>   {
>   	struct drm_sched_job *job, *next;
>   
> @@ -1107,22 +1095,25 @@ drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
>   
>   	job = list_first_entry_or_null(&sched->pending_list,
>   				       struct drm_sched_job, list);
> -
>   	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
>   		/* remove job from pending_list */
>   		list_del_init(&job->list);
>   
>   		/* cancel this job's TO timer */
>   		cancel_delayed_work(&sched->work_tdr);
> -		/* make the scheduled timestamp more accurate */
> +
> +		*have_more = false;
>   		next = list_first_entry_or_null(&sched->pending_list,
>   						typeof(*next), list);
> -
>   		if (next) {
> +			/* make the scheduled timestamp more accurate */
>   			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
>   				     &next->s_fence->scheduled.flags))
>   				next->s_fence->scheduled.timestamp =
>   					dma_fence_timestamp(&job->s_fence->finished);
> +
> +			*have_more = dma_fence_is_signaled(&next->s_fence->finished);
> +
>   			/* start TO timer for next job */
>   			drm_sched_start_timeout(sched);
>   		}
> @@ -1181,12 +1172,15 @@ static void drm_sched_free_job_work(struct work_struct *w)
>   	struct drm_gpu_scheduler *sched =
>   		container_of(w, struct drm_gpu_scheduler, work_free_job);
>   	struct drm_sched_job *job;
> +	bool have_more;
>   
> -	job = drm_sched_get_finished_job(sched);
> -	if (job)
> +	job = drm_sched_get_finished_job(sched, &have_more);
> +	if (job) {
>   		sched->ops->free_job(job);
> +		if (have_more)
> +			drm_sched_run_free_queue(sched);
> +	}
>   
> -	drm_sched_run_free_queue(sched);
>   	drm_sched_run_job_queue(sched);
>   }
>   



More information about the Intel-xe mailing list