[PATCH v2 3/3] drm/xe: Get page on user fence creation
Thomas Hellström
thomas.hellstrom at linux.intel.com
Fri Mar 1 06:36:01 UTC 2024
On Thu, 2024-02-29 at 19:55 -0800, Matthew Brost wrote:
> Attempt to get page on user fence creation and kmap_local_page on
> signaling. Should reduce latency and can ensure 64 bit atomicity
> compared to copy_to_user.
>
> v2:
> - Prefault page and drop ref (Thomas)
> - Use set_page_dirty_lock (Thomas)
> - try_cmpxchg64 loop (Thomas)
>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_sync.c | 52 +++++++++++++++++++++++++++++++---
> --
> 1 file changed, 45 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_sync.c
> b/drivers/gpu/drm/xe/xe_sync.c
> index c20e1f9ad267..bf7f22519cc5 100644
> --- a/drivers/gpu/drm/xe/xe_sync.c
> +++ b/drivers/gpu/drm/xe/xe_sync.c
> @@ -6,6 +6,7 @@
> #include "xe_sync.h"
>
> #include <linux/dma-fence-array.h>
> +#include <linux/highmem.h>
> #include <linux/kthread.h>
> #include <linux/sched/mm.h>
> #include <linux/uaccess.h>
> @@ -28,6 +29,7 @@ struct xe_user_fence {
> u64 __user *addr;
> u64 value;
> int signalled;
> + bool use_page;
> };
>
> static void user_fence_destroy(struct kref *kref)
> @@ -53,7 +55,9 @@ static struct xe_user_fence
> *user_fence_create(struct xe_device *xe, u64 addr,
> u64 value)
> {
> struct xe_user_fence *ufence;
> + struct page *page;
> u64 __user *ptr = u64_to_user_ptr(addr);
> + int ret;
>
> if (!access_ok(ptr, sizeof(ptr)))
> return ERR_PTR(-EFAULT);
> @@ -69,19 +73,53 @@ static struct xe_user_fence
> *user_fence_create(struct xe_device *xe, u64 addr,
> ufence->mm = current->mm;
> mmgrab(ufence->mm);
>
> + /* Prefault page */
> + ret = get_user_pages_fast(addr, 1, FOLL_WRITE, &page);
> + if (ret == 1) {
> + ufence->use_page = true;
> + put_page(page);
> + }
> +
> return ufence;
> }
>
> static void user_fence_worker(struct work_struct *w)
> {
> struct xe_user_fence *ufence = container_of(w, struct
> xe_user_fence, worker);
> -
> - if (mmget_not_zero(ufence->mm)) {
> - kthread_use_mm(ufence->mm);
> - if (copy_to_user(ufence->addr, &ufence->value,
> sizeof(ufence->value)))
> - XE_WARN_ON("Copy to user failed");
> - kthread_unuse_mm(ufence->mm);
> - mmput(ufence->mm);
> + struct mm_struct *mm = ufence->mm;
> +
> + if (mmget_not_zero(mm)) {
> + kthread_use_mm(mm);
> + if (ufence->use_page) {
> + struct page *page;
> + int ret;
> +
> + ret = get_user_pages_fast((unsigned
> long)ufence->addr,
> + 1, FOLL_WRITE,
> &page);
> + if (ret == 1) {
> + u64 *ptr;
> + u64 old = 0;
> + void *va;
> +
> + va = kmap_local_page(page);
> + ptr = va + offset_in_page(ufence-
> >addr);
> + while (!try_cmpxchg64(ptr, &old,
> ufence->value))
> + continue;
> + kunmap_local(va);
> +
> + set_page_dirty_lock(page);
> + put_page(page);
> + } else {
> + ufence->use_page = false;
> + }
> + }
> + if (!ufence->use_page) {
Hmm. Trying to figure out the semantics here. If ever used on 32-bit,
and get_user_pages() fails, then I figure we can't guarantee atomicity.
That would typically be if the user-fence is in buffer-object or device
memory?
> + if (copy_to_user(ufence->addr, &ufence-
> >value,
> + sizeof(ufence->value)))
We should probably use put_user() here. On 64-bit I think that always
translates to an atomic write. And we should IMO precede with an mb()
to avoid in-kernel reordering. That would typically need to pair with
an mb() in the reader as well.
> + drm_warn(&ufence->xe->drm, "Copy to
> user failed\n");
> + }
> + kthread_unuse_mm(mm);
> + mmput(mm);
> }
>
> wake_up_all(&ufence->xe->ufence_wq);
More information about the Intel-xe
mailing list