[PATCH 5/5] drm/xe: Use hmm_range_fault to populate user pages

Zeng, Oak oak.zeng at intel.com
Tue Mar 19 02:36:53 UTC 2024


Hi Matt,

> -----Original Message-----
> From: Brost, Matthew <matthew.brost at intel.com>
> Sent: Thursday, March 14, 2024 4:55 PM
> To: Zeng, Oak <oak.zeng at intel.com>
> Cc: intel-xe at lists.freedesktop.org; Hellstrom, Thomas
> <thomas.hellstrom at intel.com>; airlied at gmail.com; Welty, Brian
> <brian.welty at intel.com>; Ghimiray, Himal Prasad
> <himal.prasad.ghimiray at intel.com>
> Subject: Re: [PATCH 5/5] drm/xe: Use hmm_range_fault to populate user pages
> 
> On Wed, Mar 13, 2024 at 11:35:53PM -0400, Oak Zeng wrote:
> > This is an effort to unify hmmptr (aka system allocator)
> > and userptr code. hmm_range_fault is used to populate
> > a virtual address range for both hmmptr and userptr,
> > instead of hmmptr using hmm_range_fault and userptr
> > using get_user_pages_fast.
> >
> > This also aligns with AMD gpu driver's behavior. In
> > long term, we plan to put some common helpers in this
> > area to drm layer so it can be re-used by different
> > vendors.
> >
> > Signed-off-by: Oak Zeng <oak.zeng at intel.com>
> > ---
> >  drivers/gpu/drm/xe/xe_vm.c | 105 ++-----------------------------------
> >  1 file changed, 4 insertions(+), 101 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index db3f049a47dc..d6088dcac74a 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -38,6 +38,7 @@
> >  #include "xe_sync.h"
> >  #include "xe_trace.h"
> >  #include "xe_wa.h"
> > +#include "xe_hmm.h"
> >
> >  static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
> >  {
> > @@ -65,113 +66,15 @@ int xe_vma_userptr_check_repin(struct
> xe_userptr_vma *uvma)
> >
> >  int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
> 
> See my comments in the previous patch about layer, those comments are
> valid here too.
> 
> >  {
> > -	struct xe_userptr *userptr = &uvma->userptr;
> >  	struct xe_vma *vma = &uvma->vma;
> >  	struct xe_vm *vm = xe_vma_vm(vma);
> >  	struct xe_device *xe = vm->xe;
> > -	const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
> > -	struct page **pages;
> > -	bool in_kthread = !current->mm;
> > -	unsigned long notifier_seq;
> > -	int pinned, ret, i;
> > -	bool read_only = xe_vma_read_only(vma);
> > +	bool write = !xe_vma_read_only(vma);
> > +	struct hmm_range hmm_range;
> >
> >  	lockdep_assert_held(&vm->lock);
> >  	xe_assert(xe, xe_vma_is_userptr(vma));
> > -retry:
> > -	if (vma->gpuva.flags & XE_VMA_DESTROYED)
> > -		return 0;
> 
> ^^^
> This should not be dropped. Both the vma->gpuva.flags & XE_VMA_DESTROYED
> and userptr invalidation check retry loop should still be in here.

I will move this check into hmm.c
> 
> > -
> > -	notifier_seq = mmu_interval_read_begin(&userptr->notifier);
> > -	if (notifier_seq == userptr->notifier_seq)
> > -		return 0;
> > -
> > -	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
> > -	if (!pages)
> > -		return -ENOMEM;
> > -
> > -	if (userptr->sg) {
> > -		dma_unmap_sgtable(xe->drm.dev,
> > -				  userptr->sg,
> > -				  read_only ? DMA_TO_DEVICE :
> > -				  DMA_BIDIRECTIONAL, 0);
> > -		sg_free_table(userptr->sg);
> > -		userptr->sg = NULL;
> > -	}
> 
> ^^^
> Likewise, I don't think this should be dropped either.

Will move to hmm.c

> 
> > -
> > -	pinned = ret = 0;
> > -	if (in_kthread) {
> > -		if (!mmget_not_zero(userptr->notifier.mm)) {
> > -			ret = -EFAULT;
> > -			goto mm_closed;
> > -		}
> > -		kthread_use_mm(userptr->notifier.mm);
> > -	}
> 
> ^^^
> Nor this.

Will move to hmm.c


> 
> > -
> > -	while (pinned < num_pages) {
> > -		ret = get_user_pages_fast(xe_vma_userptr(vma) +
> > -					  pinned * PAGE_SIZE,
> > -					  num_pages - pinned,
> > -					  read_only ? 0 : FOLL_WRITE,
> > -					  &pages[pinned]);
> > -		if (ret < 0)
> > -			break;
> > -
> > -		pinned += ret;
> > -		ret = 0;
> > -	}
> 
> ^^^
> We should be replacing this.
> 
> > -
> > -	if (in_kthread) {
> > -		kthread_unuse_mm(userptr->notifier.mm);
> > -		mmput(userptr->notifier.mm);
> > -	}
> > -mm_closed:
> > -	if (ret)
> > -		goto out;
> > -
> > -	ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
> > -						pinned, 0,
> > -						(u64)pinned << PAGE_SHIFT,
> > -						xe_sg_segment_size(xe-
> >drm.dev),
> > -						GFP_KERNEL);
> > -	if (ret) {
> > -		userptr->sg = NULL;
> > -		goto out;
> > -	}
> > -	userptr->sg = &userptr->sgt;
> > -
> > -	ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
> > -			      read_only ? DMA_TO_DEVICE :
> > -			      DMA_BIDIRECTIONAL,
> > -			      DMA_ATTR_SKIP_CPU_SYNC |
> > -			      DMA_ATTR_NO_KERNEL_MAPPING);
> > -	if (ret) {
> > -		sg_free_table(userptr->sg);
> > -		userptr->sg = NULL;
> > -		goto out;
> > -	}
> > -
> > -	for (i = 0; i < pinned; ++i) {
> > -		if (!read_only) {
> > -			lock_page(pages[i]);
> > -			set_page_dirty(pages[i]);
> > -			unlock_page(pages[i]);
> > -		}
> > -
> > -		mark_page_accessed(pages[i]);
> > -	}
> > -
> > -out:
> > -	release_pages(pages, pinned);
> > -	kvfree(pages);
> 
> ^^^
> Through here (minus existing the kthread) with hmm call. I guess the
> kthread enter / exit could be in the hmm layer too.


Moved missing parts to hmm.c. will send out v1.

Thanks a lot for the reviewing Matt!

Oak
> 
> Matt
> 
> > -
> > -	if (!(ret < 0)) {
> > -		userptr->notifier_seq = notifier_seq;
> > -		if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
> > -			goto retry;
> > -	}
> > -
> > -	return ret < 0 ? ret : 0;
> > +	return xe_hmm_populate_range(vma, &hmm_range, write);
> >  }
> >
> >  static bool preempt_fences_waiting(struct xe_vm *vm)
> > --
> > 2.26.3
> >


More information about the Intel-xe mailing list