[PATCH 3/3] drm/xe: Get page on user fence creation

Thomas Hellström thomas.hellstrom at linux.intel.com
Tue Feb 27 11:01:27 UTC 2024


On Mon, 2024-02-26 at 18:43 -0800, Matthew Brost wrote:
> Attempt to get page on user fence creation and kmap_local_page on
> signaling. Should reduce latency and can ensure 64 bit atomicity
> compared to copy_to_user.
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
>  drivers/gpu/drm/xe/xe_sync.c | 45 ++++++++++++++++++++++++++++------
> --
>  1 file changed, 36 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_sync.c
> b/drivers/gpu/drm/xe/xe_sync.c
> index 022e158d28d9..2f3e062c0101 100644
> --- a/drivers/gpu/drm/xe/xe_sync.c
> +++ b/drivers/gpu/drm/xe/xe_sync.c
> @@ -6,6 +6,7 @@
>  #include "xe_sync.h"
>  
>  #include <linux/dma-fence-array.h>
> +#include <linux/highmem.h>
>  #include <linux/kthread.h>
>  #include <linux/sched/mm.h>
>  #include <linux/uaccess.h>
> @@ -25,6 +26,7 @@ struct user_fence {
>  	struct dma_fence_cb cb;
>  	struct work_struct worker;
>  	struct mm_struct *mm;
> +	struct page *page;
>  	u64 __user *addr;
>  	u64 value;
>  };
> @@ -34,7 +36,10 @@ static void user_fence_destroy(struct kref *kref)
>  	struct user_fence *ufence = container_of(kref, struct
> user_fence,
>  						 refcount);
>  
> -	mmdrop(ufence->mm);
> +	if (ufence->page)
> +		put_page(ufence->page);
> +	else if (ufence->mm)
> +		mmdrop(ufence->mm);
>  	kfree(ufence);
>  }
>  
> @@ -53,6 +58,7 @@ static struct user_fence *user_fence_create(struct
> xe_device *xe, u64 addr,
>  {
>  	struct user_fence *ufence;
>  	u64 __user *ptr = u64_to_user_ptr(addr);
> +	int ret;
>  
>  	if (!access_ok(ptr, sizeof(ptr)))
>  		return ERR_PTR(-EFAULT);
> @@ -66,7 +72,11 @@ static struct user_fence *user_fence_create(struct
> xe_device *xe, u64 addr,
>  	ufence->addr = ptr;
>  	ufence->value = value;
>  	ufence->mm = current->mm;
> -	mmgrab(ufence->mm);
> +	ret = get_user_pages_fast(addr, 1, FOLL_WRITE, &ufence-
> >page);

Hmm. This is mid-term pinning a page-cache page. We shouldn't really do
that since it interferes with huge pages and numa migration.

What about just prefaulting and dropping the refcount and then we
do this again when signalling?

> +	if (ret != 1) {
> +		mmgrab(ufence->mm);
> +		ufence->page = NULL;
> +	}
>  
>  	return ufence;
>  }
> @@ -74,13 +84,30 @@ static struct user_fence
> *user_fence_create(struct xe_device *xe, u64 addr,
>  static void user_fence_worker(struct work_struct *w)
>  {
>  	struct user_fence *ufence = container_of(w, struct
> user_fence, worker);
> -
> -	if (mmget_not_zero(ufence->mm)) {
> -		kthread_use_mm(ufence->mm);
> -		if (copy_to_user(ufence->addr, &ufence->value,
> sizeof(ufence->value)))
> -			XE_WARN_ON("Copy to user failed");
> -		kthread_unuse_mm(ufence->mm);
> -		mmput(ufence->mm);
> +	struct mm_struct *mm = ufence->mm;
> +
> +	if (mmget_not_zero(mm)) {
> +		if (ufence->page) {
> +			u64 *ptr;
> +			void *va;


> +
> +			va = kmap_local_page(ufence->page);
> +			ptr = va + offset_in_page(ufence->addr);
> +			xchg(ptr, ufence->value);

Does this compile on 32-bit?

> +			kunmap_local(va);
> +
> +			set_page_dirty(ufence->page);

I think set_page_dirty_locked() should be used here.

> +			put_page(ufence->page);
> +			ufence->page = NULL;
> +			ufence->mm = NULL;
> +		} else {
> +			kthread_use_mm(mm);

So we could do the whole thing here instead, including a
get_user_pages_fast(). Typically that would be a lock-free fast lookup
unless the page got migrated between creation and signalling.


> +			if (copy_to_user(ufence->addr, &ufence-
> >value,
> +					 sizeof(ufence->value)))
> +				drm_warn(&ufence->xe->drm, "Copy to
> user failed\n");
> +			kthread_unuse_mm(mm);
> +		}
> +		mmput(mm);
>  	}
>  
>  	wake_up_all(&ufence->xe->ufence_wq);



More information about the Intel-xe mailing list