[PATCH v2 3/3] drm/xe: Get page on user fence creation

Thomas Hellström thomas.hellstrom at linux.intel.com
Fri Mar 1 13:31:47 UTC 2024


On Fri, 2024-03-01 at 09:56 +0100, Thomas Hellström wrote:
> On Fri, 2024-03-01 at 07:46 +0000, Matthew Brost wrote:
> > On Fri, Mar 01, 2024 at 07:36:01AM +0100, Thomas Hellström wrote:
> > > On Thu, 2024-02-29 at 19:55 -0800, Matthew Brost wrote:
> > > > Attempt to get page on user fence creation and kmap_local_page
> > > > on
> > > > signaling. Should reduce latency and can ensure 64 bit
> > > > atomicity
> > > > compared to copy_to_user.
> > > > 
> > > > v2:
> > > >  - Prefault page and drop ref (Thomas)
> > > >  - Use set_page_dirty_lock (Thomas)
> > > >  - try_cmpxchg64 loop (Thomas)
> > > > 
> > > > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > > > ---
> > > >  drivers/gpu/drm/xe/xe_sync.c | 52
> > > > +++++++++++++++++++++++++++++++---
> > > > --
> > > >  1 file changed, 45 insertions(+), 7 deletions(-)
> > > > 
> > > > diff --git a/drivers/gpu/drm/xe/xe_sync.c
> > > > b/drivers/gpu/drm/xe/xe_sync.c
> > > > index c20e1f9ad267..bf7f22519cc5 100644
> > > > --- a/drivers/gpu/drm/xe/xe_sync.c
> > > > +++ b/drivers/gpu/drm/xe/xe_sync.c
> > > > @@ -6,6 +6,7 @@
> > > >  #include "xe_sync.h"
> > > >  
> > > >  #include <linux/dma-fence-array.h>
> > > > +#include <linux/highmem.h>
> > > >  #include <linux/kthread.h>
> > > >  #include <linux/sched/mm.h>
> > > >  #include <linux/uaccess.h>
> > > > @@ -28,6 +29,7 @@ struct xe_user_fence {
> > > >  	u64 __user *addr;
> > > >  	u64 value;
> > > >  	int signalled;
> > > > +	bool use_page;
> > > >  };
> > > >  
> > > >  static void user_fence_destroy(struct kref *kref)
> > > > @@ -53,7 +55,9 @@ static struct xe_user_fence
> > > > *user_fence_create(struct xe_device *xe, u64 addr,
> > > >  					       u64 value)
> > > >  {
> > > >  	struct xe_user_fence *ufence;
> > > > +	struct page *page;
> > > >  	u64 __user *ptr = u64_to_user_ptr(addr);
> > > > +	int ret;
> > > >  
> > > >  	if (!access_ok(ptr, sizeof(ptr)))
> > > >  		return ERR_PTR(-EFAULT);
> > > > @@ -69,19 +73,53 @@ static struct xe_user_fence
> > > > *user_fence_create(struct xe_device *xe, u64 addr,
> > > >  	ufence->mm = current->mm;
> > > >  	mmgrab(ufence->mm);
> > > >  
> > > > +	/* Prefault page */
> > > > +	ret = get_user_pages_fast(addr, 1, FOLL_WRITE, &page);
> > > > +	if (ret == 1) {
> > > > +		ufence->use_page = true;
> > > > +		put_page(page);
> > > > +	}
> > > > +
> > > >  	return ufence;
> > > >  }
> > > >  
> > > >  static void user_fence_worker(struct work_struct *w)
> > > >  {
> > > >  	struct xe_user_fence *ufence = container_of(w, struct
> > > > xe_user_fence, worker);
> > > > -
> > > > -	if (mmget_not_zero(ufence->mm)) {
> > > > -		kthread_use_mm(ufence->mm);
> > > > -		if (copy_to_user(ufence->addr, &ufence->value,
> > > > sizeof(ufence->value)))
> > > > -			XE_WARN_ON("Copy to user failed");
> > > > -		kthread_unuse_mm(ufence->mm);
> > > > -		mmput(ufence->mm);
> > > > +	struct mm_struct *mm = ufence->mm;
> > > > +
> > > > +	if (mmget_not_zero(mm)) {
> > > > +		kthread_use_mm(mm);
> > > > +		if (ufence->use_page) {
> > > > +			struct page *page;
> > > > +			int ret;
> > > > +
> > > > +			ret = get_user_pages_fast((unsigned
> > > > long)ufence->addr,
> > > > +						  1,
> > > > FOLL_WRITE,
> > > > &page);
> > > > +			if (ret == 1) {
> > > > +				atomic64_t *ptr;
> > > > +				u64 old = 0;
> > > > +				void *va;
> > > > +
> > > > +				va = kmap_local_page(page);
> > > > +				ptr = va +
> > > > offset_in_page(ufence-
> > > > > addr);
> > > > +				while (!try_cmpxchg64(ptr,
> > > > &old,
> > > > ufence->value))
> > > > +					continue;
> 
> I'm still a little worried about the availability of this, like when
> the build-bot tests on all available architectures, and Linus has
> already pulled the stuff. It's definitely there on i386, and it seems
> to be used generically in sched/clock.c. Might be worth-wile to CC
> dri-
> devel/lkml and have the build bots pick it up...
> 
> 
> > > > +				kunmap_local(va);
> > > > +
> > > > +				set_page_dirty_lock(page);
> > > > +				put_page(page);
> > > > +			} else {
> > > > +				ufence->use_page = false;
> > > > +			}
> > > > +		}
> > > > +		if (!ufence->use_page) {
> > > 
> > > Hmm. Trying to figure out the semantics here. If ever used on 32-
> > > bit,
> > > and get_user_pages() fails, then I figure we can't guarantee
> > > atomicity.
> > > That would typically be if the user-fence is in buffer-object or
> > > device
> > > memory?
> > > 
> > 
> > I think so, based on [1] if the ufence is a mapped BO on TGL
> > get_user_pages_fast doesn't work and !use_page path is used. Hence
> > I
> > add
> > malloc ufence section in [1].
> > 
> > [1]
> > https://patchwork.freedesktop.org/patch/580147/?series=130417&rev=1
> > 
> > > > +			if (copy_to_user(ufence->addr,
> > > > &ufence-
> > > > > value,
> > > > +					 sizeof(ufence-
> > > > >value)))
> > > 
> > > We should probably use put_user() here. On 64-bit I think that
> > > always
> > > translates to an atomic write. And we should IMO precede with an
> > > mb()
> > > to avoid in-kernel reordering. That would typically need to pair
> > > with
> > > an mb() in the reader as well.
> > > 
> > 
> > Got on on the put_user(), seems to work.
> > 
> > A little unclear on mb() usage.
> > 
> > Would it be?
> > mb()
> > put_user()
> 
> Yes, this is correct. Actually we'd want smp_store_release()
> semantics
> here, but this is stricter.
> 
> 
> > 
> > And then in xe_wait_user_fence.c:do_compare?
> > mb()
> > copy_from_user
> 
> Here we'd want smp_read_acquire() but we'd have to do with the below.
> get_user()
> mb();

Oh, and actually smp_mb() should be sufficient here.


> 
> And user-space should use a similar mb() as well if they need to be
> sure things are indeed done after the signalling.
> 
> /Thomas
> 
> > 
> > Matt
> > 
> > > 
> > > > +				drm_warn(&ufence->xe->drm,
> > > > "Copy
> > > > to
> > > > user failed\n");
> > > > +		}
> > > > +		kthread_unuse_mm(mm);
> > > > +		mmput(mm);
> > > >  	}
> > > >  
> > > >  	wake_up_all(&ufence->xe->ufence_wq);
> > > 
> 



More information about the Intel-xe mailing list