[CI 09/44] drm/svm: Mark drm_gpuvm to participate SVM
Oak Zeng
oak.zeng at intel.com
Fri Jun 14 21:57:42 UTC 2024
A mm_struct field is added to drm_gpuvm. Also add a parameter to
drm_gpuvm_init to say whether this gpuvm participate svm (shared
virtual memory with CPU process). Under SVM, CPU program and GPU
program share one process virtual address space.
Cc: Dave Airlie <airlied at redhat.com>
Cc: Daniel Vetter <daniel at ffwll.ch>
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Cc: Felix Kuehling <felix.kuehling at amd.com>
Cc: Jason Gunthorpe <jgg at nvidia.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Brian Welty <brian.welty at intel.com>
Cc: <dri-devel at lists.freedesktop.org>
Suggested-by: Christian König <christian.koenig at amd.com>
Signed-off-by: Oak Zeng <oak.zeng at intel.com>
---
drivers/gpu/drm/drm_gpuvm.c | 7 ++++++-
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +-
drivers/gpu/drm/xe/xe_vm.c | 2 +-
include/drm/drm_gpuvm.h | 8 +++++++-
4 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 7402ed6f1d33..5f246ca472a8 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -984,6 +984,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
* @reserve_offset: the start of the kernel reserved GPU VA area
* @reserve_range: the size of the kernel reserved GPU VA area
* @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
+ * @participate_svm: whether this gpuvm participat shared virtual memory
+ * with CPU mm
*
* The &drm_gpuvm must be initialized with this function before use.
*
@@ -997,7 +999,7 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
struct drm_gem_object *r_obj,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
- const struct drm_gpuvm_ops *ops)
+ const struct drm_gpuvm_ops *ops, bool participate_svm)
{
gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpuvm->rb.list);
@@ -1016,6 +1018,9 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
gpuvm->drm = drm;
gpuvm->r_obj = r_obj;
+ if (participate_svm)
+ gpuvm->mm = current->mm;
+
drm_gem_object_get(r_obj);
drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index ee02cd833c5e..0d11f1733e29 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1861,7 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
NOUVEAU_VA_SPACE_END,
init->kernel_managed_addr,
init->kernel_managed_size,
- &gpuvm_ops);
+ &gpuvm_ops, false);
/* GPUVM takes care from here on. */
drm_gem_object_put(r_obj);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ffda487653d8..bcb0a38b31ae 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1363,7 +1363,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
}
drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
- vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
+ vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops, false);
drm_gem_object_put(vm_resv_obj);
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 429dc0d82eba..838dd7137f07 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -242,6 +242,12 @@ struct drm_gpuvm {
* @drm: the &drm_device this VM lives in
*/
struct drm_device *drm;
+ /**
+ * @mm: the process &mm_struct which create this gpuvm.
+ * This is only used for shared virtual memory where virtual
+ * address space is shared b/t CPU and GPU program.
+ */
+ struct mm_struct *mm;
/**
* @mm_start: start of the VA space
@@ -342,7 +348,7 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
struct drm_gem_object *r_obj,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
- const struct drm_gpuvm_ops *ops);
+ const struct drm_gpuvm_ops *ops, bool participate_svm);
/**
* drm_gpuvm_get() - acquire a struct drm_gpuvm reference
--
2.26.3
More information about the Intel-xe
mailing list