[v2 25/31] drm/xe/svm: Add vm to xe_svm process

Oak Zeng oak.zeng at intel.com
Tue Apr 9 20:17:36 UTC 2024


One shared virtual address space (xe_svm) works across CPU
and multiple GPUs under one CPU process. Each xe_svm process
can have multiple gpu vm, for example, one gpu vm for one
gpu card. Add gpu vm to the current xe_svm process during
xe_vm creation to note this gpu vm participate the shared
virtual address space with the current CPU process, also
remove xe_vm from xe_svm on xe_vm destroy.

FIXME: right now we blindly add all xe_vm to svm. Should
we introduce uAPI to allow user decide which xe_vm participate
svm?

Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
 drivers/gpu/drm/xe/xe_svm.c      | 45 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_svm.h      |  3 +++
 drivers/gpu/drm/xe/xe_vm.c       |  5 ++++
 drivers/gpu/drm/xe/xe_vm_types.h |  2 ++
 4 files changed, 55 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 416cfc81c053..1f4c2d32121a 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/hashtable.h>
 #include "xe_svm.h"
+#include "xe_vm_types.h"
 
 #define XE_MAX_SVM_PROCESS 5 /* Maximumly support 32 SVM process*/
 DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
@@ -75,3 +76,47 @@ struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm)
 
 	return NULL;
 }
+
+/**
+ * xe_svm_add_vm() - add a gpu vm to the current svm process
+ *
+ * @vm: The gpu vm to add to the current svm process.
+ *
+ * One shared virtual address space (xe_svm) works across CPU
+ * and multiple GPUs. So each xe_svm process can have N gpu
+ * vm, for example, one gpu vm for on gpu card. This function
+ * add a gpu vm to the current xe_svm process.
+ */
+void xe_svm_add_vm(struct xe_vm *vm)
+{
+	struct xe_svm *svm;
+
+	svm = xe_lookup_svm_by_mm(current->mm);
+	if (!svm)
+		svm = xe_create_svm();
+
+	mutex_lock(&svm->mutex);
+	list_add(&vm->svm_link, &svm->vm_list);
+	mutex_unlock(&svm->mutex);
+}
+
+/**
+ * xe_svm_remove_vm() - remove a gpu vm from svm process
+ *
+ * @vm: The gpu vm to remove from svm process.
+ */
+void xe_svm_remove_vm(struct xe_vm *vm)
+{
+	struct xe_svm *svm;
+
+	svm = xe_lookup_svm_by_mm(current->mm);
+	if (!svm)
+		return;
+
+	mutex_lock(&svm->mutex);
+	list_del(&vm->svm_link);
+	mutex_unlock(&svm->mutex);
+
+	if (list_empty(&svm->vm_list))
+		xe_destroy_svm(svm);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 066740fb93f5..f601dffe3fc1 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -11,6 +11,7 @@
 #include "xe_device.h"
 #include "xe_assert.h"
 
+struct xe_vm;
 
 /**
  * struct xe_svm - data structure to represent a shared
@@ -33,6 +34,8 @@ struct xe_svm {
 extern struct xe_svm *xe_create_svm(void);
 void xe_destroy_svm(struct xe_svm *svm);
 extern struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm);
+void xe_svm_add_vm(struct xe_vm *vm);
+void xe_svm_remove_vm(struct xe_vm *vm);
 
 /**
  * xe_mem_region_pfn_to_dpa() - Calculate page's dpa from pfn
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 61d336f24a65..498b36469d00 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -40,6 +40,7 @@
 #include "xe_trace.h"
 #include "xe_wa.h"
 #include "xe_hmm.h"
+#include "xe_svm.h"
 
 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
 {
@@ -1347,6 +1348,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
 	INIT_LIST_HEAD(&vm->userptr.repin_list);
 	INIT_LIST_HEAD(&vm->userptr.invalidated);
 	INIT_LIST_HEAD(&vm->userptr.fault_invalidated);
+	INIT_LIST_HEAD(&vm->svm_link);
 	init_rwsem(&vm->userptr.notifier_lock);
 	spin_lock_init(&vm->userptr.invalidated_lock);
 	INIT_WORK(&vm->userptr.garbage_collector, vm_userptr_garbage_collector);
@@ -1445,6 +1447,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
 		xe->usm.num_vm_in_non_fault_mode++;
 	mutex_unlock(&xe->usm.lock);
 
+	/** FIXME: Should we add vm to svm conditionally? Per uAPI?*/
+	xe_svm_add_vm(vm);
 	trace_xe_vm_create(vm);
 
 	return vm;
@@ -1562,6 +1566,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
 	for_each_tile(tile, xe, id)
 		xe_range_fence_tree_fini(&vm->rftree[id]);
 
+	xe_svm_remove_vm(vm);
 	xe_vm_put(vm);
 }
 
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index d1f5949d4a3b..eb797195c374 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -394,6 +394,8 @@ struct xe_vm {
 	bool batch_invalidate_tlb;
 	/** @xef: XE file handle for tracking this VM's drm client */
 	struct xe_file *xef;
+	/** @svm_link: used to link this vm to xe_svm's vm_list*/
+	struct list_head svm_link;
 };
 
 #endif
-- 
2.26.3



More information about the Intel-xe mailing list