[Intel-xe] [PATCH 3/4] drm/xe/vm: Perform accounting of userptr pinned pages

Thomas Hellström thomas.hellstrom at linux.intel.com
Fri Aug 18 15:08:44 UTC 2023


Account these pages against RLIMIT_MEMLOCK following how RDMA does this
with CAP_IPC_LOCK bypassing the limit.

Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c | 43 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 41 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ecbcad696b60..d9c000689002 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -34,6 +34,33 @@
 
 #define TEST_VM_ASYNC_OPS_ERROR
 
+/*
+ * Perform userptr PIN accounting against RLIMIT_MEMLOCK for now, similarly
+ * to how RDMA does this.
+ */
+static int xe_vma_mlock_alloc(struct xe_vma *vma, unsigned long num_pages)
+{
+	unsigned long lock_limit, new_pinned;
+	struct mm_struct *mm = vma->userptr.notifier.mm;
+
+	if (!can_do_mlock())
+		return -EPERM;
+
+	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+	new_pinned = atomic64_add_return(num_pages, &mm->pinned_vm);
+	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+		atomic64_sub(num_pages, &mm->pinned_vm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void xe_vma_mlock_free(struct xe_vma *vma, unsigned long num_pages)
+{
+	atomic64_sub(num_pages, &vma->userptr.notifier.mm->pinned_vm);
+}
+
 /**
  * xe_vma_userptr_check_repin() - Advisory check for repin needed
  * @vma: The userptr vma
@@ -89,9 +116,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 					    !read_only);
 		pages = vma->userptr.pinned_pages;
 	} else {
+		if (xe_vma_is_pinned(vma)) {
+			ret = xe_vma_mlock_alloc(vma, num_pages);
+			if (ret)
+				return ret;
+		}
+
 		pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
-		if (!pages)
-			return -ENOMEM;
+		if (!pages) {
+			ret = -ENOMEM;
+			goto out_account;
+		}
 	}
 
 	pinned = ret = 0;
@@ -187,6 +222,9 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 mm_closed:
 	kvfree(pages);
 	vma->userptr.pinned_pages = NULL;
+out_account:
+	if (xe_vma_is_pinned(vma))
+		xe_vma_mlock_free(vma, num_pages);
 	return ret;
 }
 
@@ -1004,6 +1042,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 			unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
 						    vma->userptr.num_pinned,
 						    !read_only);
+			xe_vma_mlock_free(vma, xe_vma_size(vma) >> PAGE_SHIFT);
 			kvfree(vma->userptr.pinned_pages);
 		}
 
-- 
2.41.0



More information about the Intel-xe mailing list