[PATCH v3] drm/xe: Invalidate userptr VMA on page pin fault

Matthew Brost matthew.brost at intel.com
Tue Mar 12 18:36:23 UTC 2024


On Tue, Mar 12, 2024 at 08:08:04AM +0100, Thomas Hellström wrote:
> On Mon, 2024-03-11 at 20:11 +0000, Matthew Brost wrote:
> > On Mon, Mar 11, 2024 at 08:47:50PM +0100, Thomas Hellström wrote:
> > > On Mon, 2024-03-11 at 12:20 -0700, Matthew Brost wrote:
> > > > Rather than return an error to the user or ban the VM when
> > > > userptr
> > > > VMA
> > > > page pin fails with -EFAULT, invalidate VMA mappings. This
> > > > supports
> > > > the
> > > > UMD use case of freeing userptr while still having bindings.
> > > > 
> > > > Now that non-faulting VMs can invalidate VMAs, drop the usm
> > > > prefix
> > > > for
> > > > the tile_invalidated member.
> > > > 
> > > > v2:
> > > >  - Fix build error (CI)
> > > > v3:
> > > >  - Don't invalidate VMA if in fault mode, rather kill VM (Thomas)
> > > >  - Update commit message with tile_invalidated name chagne
> > > > (Thomas)
> > > >  - Wait VM bookkeep slots with VM resv lock (Thomas)
> > > > 
> > > > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > > > ---
> > > >  drivers/gpu/drm/xe/xe_gt_pagefault.c |  4 ++--
> > > >  drivers/gpu/drm/xe/xe_trace.h        |  2 +-
> > > >  drivers/gpu/drm/xe/xe_vm.c           | 29 ++++++++++++++++++++--
> > > > ----
> > > > --
> > > >  drivers/gpu/drm/xe/xe_vm_types.h     |  7 ++-----
> > > >  4 files changed, 26 insertions(+), 16 deletions(-)
> > > > 
> > > > diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > > > b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > > > index 73c535193a98..241c294270d9 100644
> > > > --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > > > +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
> > > > @@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type
> > > > access_type)
> > > >  static bool vma_is_valid(struct xe_tile *tile, struct xe_vma
> > > > *vma)
> > > >  {
> > > >  	return BIT(tile->id) & vma->tile_present &&
> > > > -		!(BIT(tile->id) & vma->usm.tile_invalidated);
> > > > +		!(BIT(tile->id) & vma->tile_invalidated);
> > > >  }
> > > >  
> > > >  static bool vma_matches(struct xe_vma *vma, u64 page_addr)
> > > > @@ -226,7 +226,7 @@ static int handle_pagefault(struct xe_gt *gt,
> > > > struct pagefault *pf)
> > > >  
> > > >  	if (xe_vma_is_userptr(vma))
> > > >  		ret =
> > > > xe_vma_userptr_check_repin(to_userptr_vma(vma));
> > > > -	vma->usm.tile_invalidated &= ~BIT(tile->id);
> > > > +	vma->tile_invalidated &= ~BIT(tile->id);
> > > >  
> > > >  unlock_dma_resv:
> > > >  	drm_exec_fini(&exec);
> > > > diff --git a/drivers/gpu/drm/xe/xe_trace.h
> > > > b/drivers/gpu/drm/xe/xe_trace.h
> > > > index 4ddc55527f9a..846f14507d5f 100644
> > > > --- a/drivers/gpu/drm/xe/xe_trace.h
> > > > +++ b/drivers/gpu/drm/xe/xe_trace.h
> > > > @@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma,
> > > > xe_vma_userptr_invalidate,
> > > >  	     TP_ARGS(vma)
> > > >  );
> > > >  
> > > > -DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
> > > > +DEFINE_EVENT(xe_vma, xe_vma_invalidate,
> > > >  	     TP_PROTO(struct xe_vma *vma),
> > > >  	     TP_ARGS(vma)
> > > >  );
> > > > diff --git a/drivers/gpu/drm/xe/xe_vm.c
> > > > b/drivers/gpu/drm/xe/xe_vm.c
> > > > index 643b3701a738..19323b411cb2 100644
> > > > --- a/drivers/gpu/drm/xe/xe_vm.c
> > > > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > > > @@ -724,11 +724,25 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
> > > >  	list_for_each_entry_safe(uvma, next, &vm-
> > > > > userptr.repin_list,
> > > >  				 userptr.repin_link) {
> > > >  		err = xe_vma_userptr_pin_pages(uvma);
> > > > -		if (err < 0)
> > > > -			return err;
> > > > -
> > > >  		list_del_init(&uvma->userptr.repin_link);
> > > > -		list_move_tail(&uvma->vma.combined_links.rebind,
> > > > &vm->rebind_list);
> > > > +		if (err == -EFAULT && !xe_vm_in_fault_mode(vm))
> > > > {
> > 
> > Just remembered this now. This function is not called in fault mode
> > either. It is called by the exec IOCTL (dma-fence mode) or preempt
> > rebind worker.
> > 
> > I'll delete this part and add an assert at the top of the function.
> > 
> > > > +			/* Wait for pending binds */
> > > > +			xe_vm_lock(vm, false);
> > > > +			dma_resv_wait_timeout(xe_vm_resv(vm),
> > > > +					     
> > > > DMA_RESV_USAGE_BOOKKEEP,
> > > > +					      false,
> > > > MAX_SCHEDULE_TIMEOUT);
> > > > +
> > > > +			err = xe_vm_invalidate_vma(&uvma->vma);
> > > 
> > > This can still race with yet another notifier, I think.
> > > 
> > 
> > Wouldn't another invalidate just add it back to the
> > &vm->userptr.invalidated list and trigger xe_vm_userptr_pin again?
> > That
> > seems to be fine to me.
> 
> What I'm more afraid of is if we modify state without the relevant
> locks held, although that might be restricted to the tile_invalidated
> member. But most probably the invalidation can race against itself as
> well so we might want to look at handling that separately.
>

Still not seeing a race.

This code path is mutually exclusive with any other code path doing
binds as vm->lock is in write mode. We modify the tile_invalidated field
here.

The notifier can run in parallel which only reads the tile_present field
and adds the invalidated userptr to &vm->userptr.invalidated list. The
list has its own looking locking.

Again all of this seems safe to me.
 
> > 
> > The lockdep in xe_vm_invalidate_vma would be unhappy though (I
> > think)?
> 
> I think lockdep should be happy, actually. It only checks whether the
> seqno is invalid and at this point it should be AFAICT, since we
> haven't updated it yet.
> 
> > Maybe we call a version of xe_vm_invalidate_vma here where the
> > lockdep
> > checks are skipped?
> > 
> > > > +			xe_vm_unlock(vm);
> > > > +			if (err)
> > > > +				return err;
> > > > +		} else {
> > > > +			if (err < 0)
> > > 
> > > And here we should've kept the userptr on the repin list.
> > > 
> > 
> > Yes this is a change in behavior but this change is fine as I think
> > we
> > try to kill the VM at this point, right? I can change it back if you
> > feel strongly.
> 
> If it's an exec and hit an -ENOMEM, chances are that the user wants to
> retry after freeing up memory. Not sure if we can see -ERESTARTSYS or -
> EINTR at this point, but we probably will once we make the
> mmu_interval_read_begin() wait interruptible.
> 

Right, will fix.

Matt

> /Thomas
> 
> 
> >  
> > Matt
> > 
> > > 
> > > /Thomas
> > > 
> > > 
> > > > +				return err;
> > > > +
> > > 
> > > 
> > > > +			list_move_tail(&uvma-
> > > > > vma.combined_links.rebind,
> > > > +				       &vm->rebind_list);
> > > > +		}
> > > >  	}
> > > >  
> > > >  	return 0;
> > > > @@ -2024,7 +2038,7 @@ static int xe_vm_prefetch(struct xe_vm *vm,
> > > > struct xe_vma *vma,
> > > >  			return err;
> > > >  	}
> > > >  
> > > > -	if (vma->tile_mask != (vma->tile_present & ~vma-
> > > > > usm.tile_invalidated)) {
> > > > +	if (vma->tile_mask != (vma->tile_present & ~vma-
> > > > > tile_invalidated)) {
> > > >  		return xe_vm_bind(vm, vma, q, xe_vma_bo(vma),
> > > > syncs,
> > > > num_syncs,
> > > >  				  true, first_op, last_op);
> > > >  	} else {
> > > > @@ -3214,9 +3228,8 @@ int xe_vm_invalidate_vma(struct xe_vma
> > > > *vma)
> > > >  	u8 id;
> > > >  	int ret;
> > > >  
> > > > -	xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
> > > >  	xe_assert(xe, !xe_vma_is_null(vma));
> > > > -	trace_xe_vma_usm_invalidate(vma);
> > > > +	trace_xe_vma_invalidate(vma);
> > > >  
> > > >  	/* Check that we don't race with page-table updates */
> > > >  	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
> > > > @@ -3254,7 +3267,7 @@ int xe_vm_invalidate_vma(struct xe_vma
> > > > *vma)
> > > >  		}
> > > >  	}
> > > >  
> > > > -	vma->usm.tile_invalidated = vma->tile_mask;
> > > > +	vma->tile_invalidated = vma->tile_mask;
> > > >  
> > > >  	return 0;
> > > >  }
> > > > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h
> > > > b/drivers/gpu/drm/xe/xe_vm_types.h
> > > > index 79b5cab57711..ae5fb565f6bf 100644
> > > > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > > > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > > > @@ -84,11 +84,8 @@ struct xe_vma {
> > > >  		struct work_struct destroy_work;
> > > >  	};
> > > >  
> > > > -	/** @usm: unified shared memory state */
> > > > -	struct {
> > > > -		/** @tile_invalidated: VMA has been invalidated
> > > > */
> > > > -		u8 tile_invalidated;
> > > > -	} usm;
> > > > +	/** @tile_invalidated: VMA has been invalidated */
> > > > +	u8 tile_invalidated;
> > > >  
> > > >  	/** @tile_mask: Tile mask of where to create binding for
> > > > this VMA */
> > > >  	u8 tile_mask;
> > > 
> 


More information about the Intel-xe mailing list