[PATCH 3/3] drm/xe: Get page on user fence creation

Matthew Brost matthew.brost at intel.com
Tue Feb 27 02:43:37 UTC 2024


Attempt to get page on user fence creation and kmap_local_page on
signaling. Should reduce latency and can ensure 64 bit atomicity
compared to copy_to_user.

Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
 drivers/gpu/drm/xe/xe_sync.c | 45 ++++++++++++++++++++++++++++--------
 1 file changed, 36 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 022e158d28d9..2f3e062c0101 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -6,6 +6,7 @@
 #include "xe_sync.h"
 
 #include <linux/dma-fence-array.h>
+#include <linux/highmem.h>
 #include <linux/kthread.h>
 #include <linux/sched/mm.h>
 #include <linux/uaccess.h>
@@ -25,6 +26,7 @@ struct user_fence {
 	struct dma_fence_cb cb;
 	struct work_struct worker;
 	struct mm_struct *mm;
+	struct page *page;
 	u64 __user *addr;
 	u64 value;
 };
@@ -34,7 +36,10 @@ static void user_fence_destroy(struct kref *kref)
 	struct user_fence *ufence = container_of(kref, struct user_fence,
 						 refcount);
 
-	mmdrop(ufence->mm);
+	if (ufence->page)
+		put_page(ufence->page);
+	else if (ufence->mm)
+		mmdrop(ufence->mm);
 	kfree(ufence);
 }
 
@@ -53,6 +58,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
 {
 	struct user_fence *ufence;
 	u64 __user *ptr = u64_to_user_ptr(addr);
+	int ret;
 
 	if (!access_ok(ptr, sizeof(ptr)))
 		return ERR_PTR(-EFAULT);
@@ -66,7 +72,11 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
 	ufence->addr = ptr;
 	ufence->value = value;
 	ufence->mm = current->mm;
-	mmgrab(ufence->mm);
+	ret = get_user_pages_fast(addr, 1, FOLL_WRITE, &ufence->page);
+	if (ret != 1) {
+		mmgrab(ufence->mm);
+		ufence->page = NULL;
+	}
 
 	return ufence;
 }
@@ -74,13 +84,30 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
 static void user_fence_worker(struct work_struct *w)
 {
 	struct user_fence *ufence = container_of(w, struct user_fence, worker);
-
-	if (mmget_not_zero(ufence->mm)) {
-		kthread_use_mm(ufence->mm);
-		if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
-			XE_WARN_ON("Copy to user failed");
-		kthread_unuse_mm(ufence->mm);
-		mmput(ufence->mm);
+	struct mm_struct *mm = ufence->mm;
+
+	if (mmget_not_zero(mm)) {
+		if (ufence->page) {
+			u64 *ptr;
+			void *va;
+
+			va = kmap_local_page(ufence->page);
+			ptr = va + offset_in_page(ufence->addr);
+			xchg(ptr, ufence->value);
+			kunmap_local(va);
+
+			set_page_dirty(ufence->page);
+			put_page(ufence->page);
+			ufence->page = NULL;
+			ufence->mm = NULL;
+		} else {
+			kthread_use_mm(mm);
+			if (copy_to_user(ufence->addr, &ufence->value,
+					 sizeof(ufence->value)))
+				drm_warn(&ufence->xe->drm, "Copy to user failed\n");
+			kthread_unuse_mm(mm);
+		}
+		mmput(mm);
 	}
 
 	wake_up_all(&ufence->xe->ufence_wq);
-- 
2.34.1



More information about the Intel-xe mailing list