[PATCH] drm/xe: Take ref to job's fence in arm

Matthew Brost matthew.brost at intel.com
Tue Oct 22 17:58:23 UTC 2024


On Tue, Oct 22, 2024 at 09:39:54AM +0100, Matthew Auld wrote:
> On 21/10/2024 18:35, Matthew Brost wrote:
> > Take ref to job's fence in arm rather than run job. This ref is owned by
> > the drm scheduler so it makes sense to take the ref before handing over
> > the job to the scheduler. Also removes an atomic from the run job path.
> > 
> > Suggested-by: Matthew Auld <matthew.auld at intel.com>
> > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > ---
> >   drivers/gpu/drm/xe/xe_execlist.c        | 2 +-
> >   drivers/gpu/drm/xe/xe_guc_submit.c      | 9 +++++----
> >   drivers/gpu/drm/xe/xe_sched_job.c       | 2 +-
> >   drivers/gpu/drm/xe/xe_sched_job_types.h | 1 -
> >   4 files changed, 7 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> > index f3b71fe7a96d..a8c416a48812 100644
> > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > +++ b/drivers/gpu/drm/xe/xe_execlist.c
> > @@ -313,7 +313,7 @@ execlist_run_job(struct drm_sched_job *drm_job)
> >   	q->ring_ops->emit_job(job);
> >   	xe_execlist_make_active(exl);
> > -	return dma_fence_get(job->fence);
> > +	return job->fence;
> >   }
> >   static void execlist_job_free(struct drm_sched_job *drm_job)
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index 0b81972ff651..25f51a947c3a 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -717,6 +717,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> >   	struct xe_exec_queue *q = job->q;
> >   	struct xe_guc *guc = exec_queue_to_guc(q);
> >   	struct xe_device *xe = guc_to_xe(guc);
> > +	struct dma_fence *fence = NULL;
> >   	bool lr = xe_exec_queue_is_lr(q);
> >   	xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
> > @@ -734,12 +735,12 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> >   	if (lr) {
> >   		xe_sched_job_set_error(job, -EOPNOTSUPP);
> > -		return NULL;
> > -	} else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
> > -		return job->fence;
> > +		dma_fence_put(job->fence);	/* Drop ref from xe_sched_job_arm */
> 
> Just to confirm, with lr the run_job here is not going to be run more than
> once?
> 

LR mode - at most once as we return NULL run_job so job is immediately
signaled / removed from pending job list.

Non-LR mode - after a GT reset it is possible for a job to have run_job
called a second time. In this case, there is no 'dma_fence_put' in the
resubmit path which pairs with the 'dma_fence_get' in arm.

So I think this patch is correct.

Matt

> >   	} else {
> > -		return dma_fence_get(job->fence);
> > +		fence = job->fence;
> >   	}
> > +
> > +	return fence;
> >   }
> >   static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
> > diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
> > index eeccc1c318ae..1905ca590965 100644
> > --- a/drivers/gpu/drm/xe/xe_sched_job.c
> > +++ b/drivers/gpu/drm/xe/xe_sched_job.c
> > @@ -280,7 +280,7 @@ void xe_sched_job_arm(struct xe_sched_job *job)
> >   		fence = &chain->base;
> >   	}
> > -	job->fence = fence;
> > +	job->fence = dma_fence_get(fence);	/* Pairs with put in scheduler */
> >   	drm_sched_job_arm(&job->drm);
> >   }
> > diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
> > index 0d3f76fb05ce..8ed95e1a378f 100644
> > --- a/drivers/gpu/drm/xe/xe_sched_job_types.h
> > +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
> > @@ -40,7 +40,6 @@ struct xe_sched_job {
> >   	 * @fence: dma fence to indicate completion. 1 way relationship - job
> >   	 * can safely reference fence, fence cannot safely reference job.
> >   	 */
> > -#define JOB_FLAG_SUBMIT		DMA_FENCE_FLAG_USER_BITS
> >   	struct dma_fence *fence;
> >   	/** @user_fence: write back value when BB is complete */
> >   	struct {


More information about the Intel-xe mailing list