[PATCH 10/13] drm/xe: Move ufence check to op_lock

Matthew Brost matthew.brost at intel.com
Fri Apr 19 19:34:22 UTC 2024


On Fri, Apr 19, 2024 at 08:56:57AM -0600, Zeng, Oak wrote:
> There is a typo in the commit message. Other that patch looks good.
> 

Noticed that sending it. Will fix in next rev.

Matt

> Reviewed-by: Oak Zeng <oak.zeng at intel.com>
> 
> > -----Original Message-----
> > From: Intel-xe <intel-xe-bounces at lists.freedesktop.org> On Behalf Of
> > Matthew Brost
> > Sent: Wednesday, April 10, 2024 1:41 AM
> > To: intel-xe at lists.freedesktop.org
> > Cc: Brost, Matthew <matthew.brost at intel.com>
> > Subject: [PATCH 10/13] drm/xe: Move ufence check to op_lock
> > 
> > Rather than checking for an unsignaled ufence ay
> 
> At
> 
> Oak
> 
> 
>  unbind time, check for
> > this during the op_lock function. This will help with the transition to
> > job 1 per VM bind IOCTL.
> > 
> > v2:
> >  - Rebase
> > 
> > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_vm.c | 33 +++++++++++++++++++++++----------
> >  1 file changed, 23 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 97384c77f662..0319e70577fe 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -1755,16 +1755,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct
> > xe_exec_queue *q,
> > 
> >  	trace_xe_vma_unbind(vma);
> > 
> > -	if (vma->ufence) {
> > -		struct xe_user_fence * const f = vma->ufence;
> > -
> > -		if (!xe_sync_ufence_get_status(f))
> > -			return ERR_PTR(-EBUSY);
> > -
> > -		vma->ufence = NULL;
> > -		xe_sync_ufence_put(f);
> > -	}
> > -
> >  	if (number_tiles > 1) {
> >  		fences = kmalloc_array(number_tiles, sizeof(*fences),
> >  				       GFP_KERNEL);
> > @@ -2819,6 +2809,21 @@ static int vma_lock_and_validate(struct drm_exec
> > *exec, struct xe_vma *vma,
> >  	return err;
> >  }
> > 
> > +static int check_ufence(struct xe_vma *vma)
> > +{
> > +	if (vma->ufence) {
> > +		struct xe_user_fence * const f = vma->ufence;
> > +
> > +		if (!xe_sync_ufence_get_status(f))
> > +			return -EBUSY;
> > +
> > +		vma->ufence = NULL;
> > +		xe_sync_ufence_put(f);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> >  static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
> >  			    struct xe_vma_op *op)
> >  {
> > @@ -2830,6 +2835,10 @@ static int op_lock_and_prep(struct drm_exec
> > *exec, struct xe_vm *vm,
> >  					    !xe_vm_in_fault_mode(vm));
> >  		break;
> >  	case DRM_GPUVA_OP_REMAP:
> > +		err = check_ufence(gpuva_to_vma(op->base.remap.unmap-
> > >va));
> > +		if (err)
> > +			break;
> > +
> >  		err = vma_lock_and_validate(exec,
> >  					    gpuva_to_vma(op-
> > >base.remap.unmap->va),
> >  					    false);
> > @@ -2839,6 +2848,10 @@ static int op_lock_and_prep(struct drm_exec
> > *exec, struct xe_vm *vm,
> >  			err = vma_lock_and_validate(exec, op->remap.next,
> > true);
> >  		break;
> >  	case DRM_GPUVA_OP_UNMAP:
> > +		err = check_ufence(gpuva_to_vma(op->base.unmap.va));
> > +		if (err)
> > +			break;
> > +
> >  		err = vma_lock_and_validate(exec,
> >  					    gpuva_to_vma(op-
> > >base.unmap.va),
> >  					    false);
> > --
> > 2.34.1
> 


More information about the Intel-xe mailing list