[Intel-gfx] [RFC v2 02/12] drm/i915/svm: Runtime (RT) allocator support

Niranjana Vishwanathapura niranjana.vishwanathapura at intel.com
Fri Dec 13 21:56:04 UTC 2019


Shared Virtual Memory (SVM) runtime allocator support allows
binding a shared virtual address to a buffer object (BO) in the
device page table through an ioctl call.

Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield at intel.com>
Cc: Daniel Vetter <daniel.vetter at intel.com>
Cc: Sudeep Dutt <sudeep.dutt at intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura at intel.com>
---
 drivers/gpu/drm/i915/Kconfig                  | 11 ++++
 drivers/gpu/drm/i915/Makefile                 |  3 +
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 58 ++++++++++++++----
 drivers/gpu/drm/i915/gem/i915_gem_svm.c       | 60 +++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_svm.h       | 22 +++++++
 drivers/gpu/drm/i915/i915_drv.c               | 21 +++++++
 drivers/gpu/drm/i915/i915_drv.h               | 22 +++++++
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  1 +
 drivers/gpu/drm/i915/i915_gem_gtt.h           | 13 ++++
 include/uapi/drm/i915_drm.h                   | 27 +++++++++
 10 files changed, 227 insertions(+), 11 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_svm.c
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_svm.h

diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index ba9595960bbe..c2e48710eec8 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -137,6 +137,16 @@ config DRM_I915_GVT_KVMGT
 	  Choose this option if you want to enable KVMGT support for
 	  Intel GVT-g.
 
+config DRM_I915_SVM
+	bool "Enable Shared Virtual Memory support in i915"
+	depends on STAGING
+	depends on DRM_I915
+	default n
+	help
+	  Choose this option if you want Shared Virtual Memory (SVM)
+	  support in i915. With SVM support, one can share the virtual
+	  address space between a process and the GPU.
+
 menu "drm/i915 Debugging"
 depends on DRM_I915
 depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e0fd10c0cfb8..75fe45633779 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -153,6 +153,9 @@ i915-y += \
 	  intel_region_lmem.o \
 	  intel_wopcm.o
 
+# SVM code
+i915-$(CONFIG_DRM_I915_SVM) += gem/i915_gem_svm.o
+
 # general-purpose microcontroller (GuC) support
 obj-y += gt/uc/
 i915-y += gt/uc/intel_uc.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5003e616a1ad..af360238a392 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2836,10 +2836,14 @@ int
 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file)
 {
+	struct drm_i915_gem_exec_object2 *exec2_list, *exec2_list_user;
 	struct drm_i915_gem_execbuffer2 *args = data;
-	struct drm_i915_gem_exec_object2 *exec2_list;
-	struct drm_syncobj **fences = NULL;
 	const size_t count = args->buffer_count;
+	struct drm_syncobj **fences = NULL;
+	unsigned int i = 0, svm_count = 0;
+	struct i915_address_space *vm;
+	struct i915_gem_context *ctx;
+	struct i915_svm_obj *svm_obj;
 	int err;
 
 	if (!check_buffer_count(count)) {
@@ -2851,15 +2855,46 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 	if (err)
 		return err;
 
+	ctx = i915_gem_context_lookup(file->driver_priv, args->rsvd1);
+	if (!ctx || !rcu_access_pointer(ctx->vm))
+		return -ENOENT;
+
+	rcu_read_lock();
+	vm = i915_vm_get(ctx->vm);
+	rcu_read_unlock();
+
+alloc_again:
+	svm_count = vm->svm_count;
 	/* Allocate an extra slot for use by the command parser */
-	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
+	exec2_list = kvmalloc_array(count + svm_count + 1, eb_element_size(),
 				    __GFP_NOWARN | GFP_KERNEL);
 	if (exec2_list == NULL) {
 		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
-			  count);
+			  count + svm_count);
 		return -ENOMEM;
 	}
-	if (copy_from_user(exec2_list,
+	mutex_lock(&vm->mutex);
+	if (svm_count != vm->svm_count) {
+		mutex_unlock(&vm->mutex);
+		kvfree(exec2_list);
+		goto alloc_again;
+	}
+
+	list_for_each_entry(svm_obj, &vm->svm_list, link) {
+		memset(&exec2_list[i], 0, sizeof(*exec2_list));
+		exec2_list[i].handle = svm_obj->handle;
+		exec2_list[i].offset = svm_obj->offset;
+		exec2_list[i].flags = EXEC_OBJECT_PINNED |
+				      EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+		i++;
+	}
+	exec2_list_user = &exec2_list[i];
+	args->buffer_count += svm_count;
+	mutex_unlock(&vm->mutex);
+	i915_vm_put(vm);
+	i915_gem_context_put(ctx);
+
+	if (copy_from_user(exec2_list_user,
 			   u64_to_user_ptr(args->buffers_ptr),
 			   sizeof(*exec2_list) * count)) {
 		DRM_DEBUG("copy %zd exec entries failed\n", count);
@@ -2876,6 +2911,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 	}
 
 	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
+	args->buffer_count -= svm_count;
 
 	/*
 	 * Now that we have begun execution of the batchbuffer, we ignore
@@ -2886,7 +2922,6 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 	if (args->flags & __EXEC_HAS_RELOC) {
 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
 			u64_to_user_ptr(args->buffers_ptr);
-		unsigned int i;
 
 		/* Copy the new buffer offsets back to the user's exec list. */
 		/*
@@ -2900,13 +2935,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 			goto end;
 
 		for (i = 0; i < args->buffer_count; i++) {
-			if (!(exec2_list[i].offset & UPDATE))
+			u64 *offset = &exec2_list_user[i].offset;
+
+			if (!(*offset & UPDATE))
 				continue;
 
-			exec2_list[i].offset =
-				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
-			unsafe_put_user(exec2_list[i].offset,
-					&user_exec_list[i].offset,
+			*offset = gen8_canonical_addr(*offset &
+						      PIN_OFFSET_MASK);
+			unsafe_put_user(*offset, &user_exec_list[i].offset,
 					end_user);
 		}
 end_user:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_svm.c b/drivers/gpu/drm/i915/gem/i915_gem_svm.c
new file mode 100644
index 000000000000..882fe56138e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_svm.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_lmem.h"
+
+int i915_gem_vm_bind_svm_obj(struct i915_address_space *vm,
+			     struct drm_i915_gem_vm_bind *args,
+			     struct drm_file *file)
+{
+	struct i915_svm_obj *svm_obj, *tmp;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	obj = i915_gem_object_lookup(file, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	/* For dgfx, ensure the obj is in device local memory only */
+	if (IS_DGFX(vm->i915) && !i915_gem_object_is_lmem(obj))
+		return -EINVAL;
+
+	/* FIXME: Need to handle case with unending batch buffers */
+	if (!(args->flags & I915_GEM_VM_BIND_UNBIND)) {
+		svm_obj = kmalloc(sizeof(*svm_obj), GFP_KERNEL);
+		if (!svm_obj) {
+			ret = -ENOMEM;
+			goto put_obj;
+		}
+		svm_obj->handle = args->handle;
+		svm_obj->offset = args->start;
+	}
+
+	mutex_lock(&vm->mutex);
+	if (!(args->flags & I915_GEM_VM_BIND_UNBIND)) {
+		list_add(&svm_obj->link, &vm->svm_list);
+		vm->svm_count++;
+	} else {
+		/*
+		 * FIXME: Need to handle case where object is migrated/closed
+		 * without unbinding first.
+		 */
+		list_for_each_entry_safe(svm_obj, tmp, &vm->svm_list, link) {
+			if (svm_obj->handle != args->handle)
+				continue;
+
+			list_del_init(&svm_obj->link);
+			vm->svm_count--;
+			kfree(svm_obj);
+			break;
+		}
+	}
+	mutex_unlock(&vm->mutex);
+put_obj:
+	i915_gem_object_put(obj);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_svm.h b/drivers/gpu/drm/i915/gem/i915_gem_svm.h
new file mode 100644
index 000000000000..d60b35c7d21a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_svm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_SVM_H
+#define __I915_GEM_SVM_H
+
+#include "i915_drv.h"
+
+#if defined(CONFIG_DRM_I915_SVM)
+int i915_gem_vm_bind_svm_obj(struct i915_address_space *vm,
+			     struct drm_i915_gem_vm_bind *args,
+			     struct drm_file *file);
+#else
+static inline int i915_gem_vm_bind_svm_obj(struct i915_address_space *vm,
+					   struct drm_i915_gem_vm_bind *args,
+					   struct drm_file *file)
+{ return -ENOTSUPP; }
+#endif
+
+#endif /* __I915_GEM_SVM_H */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2a11f60c4fd2..d452ea8e40b3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2680,6 +2680,26 @@ i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
 	return -ENODEV;
 }
 
+static int i915_gem_vm_bind_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_i915_gem_vm_bind *args = data;
+	struct i915_address_space *vm;
+	int ret = -EINVAL;
+
+	vm = i915_gem_address_space_lookup(file->driver_priv, args->vm_id);
+	if (unlikely(!vm))
+		return -ENOENT;
+
+	switch (args->type) {
+	case I915_GEM_VM_BIND_SVM_OBJ:
+		ret = i915_gem_vm_bind_svm_obj(vm, args, file);
+	}
+
+	i915_vm_put(vm);
+	return ret;
+}
+
 static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
@@ -2739,6 +2759,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_VM_BIND, i915_gem_vm_bind_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce130e1f1e47..2d0a7cd2dc44 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1909,6 +1909,28 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
 	return ctx;
 }
 
+static inline struct i915_address_space *
+__i915_gem_address_space_lookup_rcu(struct drm_i915_file_private *file_priv,
+				    u32 id)
+{
+	return idr_find(&file_priv->vm_idr, id);
+}
+
+static inline struct i915_address_space *
+i915_gem_address_space_lookup(struct drm_i915_file_private *file_priv,
+			      u32 id)
+{
+	struct i915_address_space *vm;
+
+	rcu_read_lock();
+	vm = __i915_gem_address_space_lookup_rcu(file_priv, id);
+	if (vm)
+		vm = i915_vm_get(vm);
+	rcu_read_unlock();
+
+	return vm;
+}
+
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
 					  u64 min_size, u64 alignment,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index be36719e7987..7d4f5fa84b02 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -586,6 +586,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
 	stash_init(&vm->free_pages);
 
 	INIT_LIST_HEAD(&vm->bound_list);
+	INIT_LIST_HEAD(&vm->svm_list);
 }
 
 static int __setup_page_dma(struct i915_address_space *vm,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 31a4a96ddd0d..7c1b54c9677d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -285,6 +285,13 @@ struct pagestash {
 	struct pagevec pvec;
 };
 
+struct i915_svm_obj {
+	/** This obj's place in the SVM object list */
+	struct list_head link;
+	u32 handle;
+	u64 offset;
+};
+
 struct i915_address_space {
 	struct kref ref;
 	struct rcu_work rcu;
@@ -329,6 +336,12 @@ struct i915_address_space {
 	 */
 	struct list_head bound_list;
 
+	/**
+	 * List of SVM bind objects.
+	 */
+	struct list_head svm_list;
+	unsigned int svm_count;
+
 	struct pagestash free_pages;
 
 	/* Global GTT */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 20314eea632a..e10d7bf2cf9f 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -360,6 +360,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_VM_CREATE		0x3a
 #define DRM_I915_GEM_VM_DESTROY		0x3b
 #define DRM_I915_GEM_OBJECT_SETPARAM	DRM_I915_GEM_CONTEXT_SETPARAM
+#define DRM_I915_GEM_VM_BIND		0x3c
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -424,6 +425,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
 #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 #define DRM_IOCTL_I915_GEM_OBJECT_SETPARAM	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_OBJECT_SETPARAM, struct drm_i915_gem_object_param)
+#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -2300,6 +2302,31 @@ struct drm_i915_query_perf_config {
 	__u8 data[];
 };
 
+/**
+ * struct drm_i915_gem_vm_bind
+ *
+ * Bind an object in a vm's page table.
+ */
+struct drm_i915_gem_vm_bind {
+	/** VA start to bind **/
+	__u64 start;
+
+	/** Type of memory to [un]bind **/
+	__u32 type;
+#define I915_GEM_VM_BIND_SVM_OBJ      0
+
+	/** Object handle to [un]bind for I915_GEM_VM_BIND_SVM_OBJ type **/
+	__u32 handle;
+
+	/** vm to [un]bind **/
+	__u32 vm_id;
+
+	/** Flags **/
+	__u32 flags;
+#define I915_GEM_VM_BIND_UNBIND      (1 << 0)
+#define I915_GEM_VM_BIND_READONLY    (1 << 1)
+};
+
 #if defined(__cplusplus)
 }
 #endif
-- 
2.21.0.rc0.32.g243a4c7e27



More information about the Intel-gfx mailing list