[PATCH 23/26] drm/i915: Allow vma binding to occur asynchronously

Chris Wilson chris at chris-wilson.co.uk
Mon Jun 3 14:46:51 UTC 2019


If we let pages be allocated asynchronously, we also then want to push
the binding process into an asynchronous task. Make it so, utilising the
recent improvements to fence error tracking and struct_mutex reduction.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  14 +-
 .../drm/i915/gem/selftests/i915_gem_context.c |   4 +
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  29 ++-
 drivers/gpu/drm/i915/i915_vma.c               | 170 +++++++++++++++---
 drivers/gpu/drm/i915/i915_vma.h               |   9 +
 drivers/gpu/drm/i915/selftests/i915_vma.c     |   4 +-
 6 files changed, 191 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 528eea44dccf..f065a4648c20 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -580,6 +580,15 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 	u64 pin_flags;
 	int err;
 
+	/*
+	 * If we load the pages asynchronously, then the user *must*
+	 * obey the reservation_object and not bypass waiting on it.
+	 * On the positive side, if the vma is not yet bound (no pages!),
+	 * then it should not have any annoying implicit fences.
+	 */
+	if (exec_flags & EXEC_OBJECT_ASYNC && !vma->pages)
+		*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+
 	pin_flags = PIN_USER | PIN_NONBLOCK;
 	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
 		pin_flags |= PIN_GLOBAL;
@@ -1245,8 +1254,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto skip_request;
 
 	i915_vma_lock(batch);
-	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
-	err = i915_vma_move_to_active(batch, rq, 0);
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 852de3836393..d4e1d6a3c67e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -295,6 +295,10 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 		goto err_batch;
 	}
 
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err)
+		goto err_request;
+
 	flags = 0;
 	if (INTEL_GEN(vm->i915) <= 5)
 		flags |= I915_DISPATCH_SECURE;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index da936db9a3f3..7b779c4364b6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,7 @@
 #include <linux/random.h>
 #include <linux/seq_file.h>
 #include <linux/stop_machine.h>
+#include <linux/workqueue.h>
 
 #include <asm/set_memory.h>
 
@@ -139,14 +140,14 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
 
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
-			  u32 unused)
+			  u32 flags)
 {
+	struct i915_address_space *vm = vma->vm;
 	u32 pte_flags;
 	int err;
 
-	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
-		err = vma->vm->allocate_va_range(vma->vm,
-						 vma->node.start, vma->size);
+	if (flags & I915_VMA_ALLOC_BIND) {
+		err = vm->allocate_va_range(vm, vma->node.start, vma->size);
 		if (err)
 			return err;
 	}
@@ -156,20 +157,26 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 	if (i915_gem_object_is_readonly(vma->obj))
 		pte_flags |= PTE_READ_ONLY;
 
-	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+	vm->insert_entries(vm, vma, cache_level, pte_flags);
 
 	return 0;
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
-	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+	struct i915_address_space *vm = vma->vm;
+
+	vm->clear_range(vm, vma->node.start, vma->size);
 }
 
 static int ppgtt_set_pages(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->pages);
 
+	wait_for_completion(&vma->obj->mm.completion);
+	if (IS_ERR(vma->obj->mm.pages))
+		return PTR_ERR(vma->obj->mm.pages);
+
 	vma->pages = vma->obj->mm.pages;
 
 	vma->page_sizes = vma->obj->mm.page_sizes;
@@ -2691,6 +2698,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	 * upgrade to both bound if we bind either to avoid double-binding.
 	 */
 	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+	dma_fence_signal(vma->async.dma);
 
 	return 0;
 }
@@ -2720,7 +2728,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 
-		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+		if (flags & I915_VMA_ALLOC_BIND) {
 			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
 							   vma->node.start,
 							   vma->size);
@@ -3907,13 +3915,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 {
 	int ret;
 
-	/* The vma->pages are only valid within the lifespan of the borrowed
+	/*
+	 * The vma->pages are only valid within the lifespan of the borrowed
 	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
 	 * must be the vma->pages. A simple rule is that vma->pages must only
 	 * be accessed when the obj->mm.pages are pinned.
 	 */
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
 
+	wait_for_completion(&vma->obj->mm.completion);
+	if (IS_ERR(vma->obj->mm.pages))
+		return PTR_ERR(vma->obj->mm.pages);
+
 	switch (vma->ggtt_view.type) {
 	default:
 		GEM_BUG_ON(vma->ggtt_view.type);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ce8fbb965e67..878f0503614c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -115,6 +115,84 @@ static void __i915_vma_retire(struct i915_active *ref)
 	i915_gem_object_put(obj); /* and drop the active reference */
 }
 
+static int __vma_bind(struct i915_vma *vma,
+		      unsigned int cache_level,
+		      unsigned int flags)
+{
+	int err;
+
+	if (!vma->pages) {
+		err = vma->ops->set_pages(vma);
+		if (err)
+			return err;
+	}
+
+	return vma->ops->bind_vma(vma, cache_level, flags);
+}
+
+static void do_async_bind(struct work_struct *work)
+{
+	struct i915_vma *vma = container_of(work, typeof(*vma), async.work);
+	struct i915_address_space *vm = vma->vm;
+	int err;
+
+	if (vma->async.dma->error)
+		goto out;
+
+	err = __vma_bind(vma, vma->async.cache_level, vma->async.flags);
+	if (err)
+		dma_fence_set_error(vma->async.dma, err);
+
+out:
+	dma_fence_signal(vma->async.dma);
+	i915_vma_put(vma);
+	i915_vm_put(vm);
+}
+
+static void __queue_async_bind(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct i915_vma *vma = container_of(cb, typeof(*vma), async.cb);
+
+	if (f->error)
+		dma_fence_set_error(vma->async.dma, f->error);
+
+	INIT_WORK(&vma->async.work, do_async_bind);
+	queue_work(system_unbound_wq, &vma->async.work);
+}
+
+static const char *async_bind_driver_name(struct dma_fence *fence)
+{
+	return DRIVER_NAME;
+}
+
+static const char *async_bind_timeline_name(struct dma_fence *fence)
+{
+	return "bind";
+}
+
+static const struct dma_fence_ops async_bind_ops = {
+	.get_driver_name = async_bind_driver_name,
+	.get_timeline_name = async_bind_timeline_name,
+};
+
+static DEFINE_SPINLOCK(async_lock);
+
+static struct dma_fence *async_fence_create(struct drm_i915_private *i915)
+{
+	struct dma_fence *f;
+
+	f = kmalloc(sizeof(*f), GFP_KERNEL);
+	if (f) {
+		dma_fence_init(f,
+			       &async_bind_ops,
+			       &async_lock,
+			       i915->mm.unordered_timeline,
+			       0);
+	}
+
+	return f;
+}
+
 static struct i915_vma *
 vma_create(struct drm_i915_gem_object *obj,
 	   struct i915_address_space *vm,
@@ -130,9 +208,6 @@ vma_create(struct drm_i915_gem_object *obj,
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
-	INIT_ACTIVE_REQUEST(&vma->last_fence);
-
 	vma->vm = vm;
 	vma->ops = &vm->vma_ops;
 	vma->obj = obj;
@@ -142,6 +217,9 @@ vma_create(struct drm_i915_gem_object *obj,
 
 	INIT_LIST_HEAD(&vma->closed_link);
 
+	i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+	INIT_ACTIVE_REQUEST(&vma->last_fence);
+
 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
 		vma->ggtt_view = *view;
 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
@@ -304,6 +382,39 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 	return vma;
 }
 
+static int queue_async_bind(struct i915_vma *vma,
+			    enum i915_cache_level cache_level,
+			    u32 flags)
+{
+	bool ready = true;
+
+	if (!vma->async.dma)
+		return -ENOMEM;
+
+	i915_vma_get(vma);
+	i915_vm_get(vma->vm);
+
+	vma->async.cache_level = cache_level;
+	vma->async.flags = flags;
+
+	i915_vma_lock(vma);
+	if (rcu_access_pointer(vma->resv->fence_excl)) { /* async pages */
+		struct dma_fence *f = reservation_object_get_excl(vma->resv);
+
+		if (!dma_fence_add_callback(f,
+					    &vma->async.cb,
+					    __queue_async_bind))
+			ready = false;
+	}
+	reservation_object_add_excl_fence(vma->resv, vma->async.dma);
+	i915_vma_unlock(vma);
+
+	if (ready)
+		__queue_async_bind(vma->async.dma, &vma->async.cb);
+
+	return 0;
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
@@ -321,17 +432,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	u32 vma_flags;
 	int ret;
 
+	GEM_BUG_ON(!flags);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->size > vma->node.size);
-
-	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
-					      vma->node.size,
-					      vma->vm->total)))
-		return -ENODEV;
-
-	if (GEM_DEBUG_WARN_ON(!flags))
-		return -EINVAL;
-
+	GEM_BUG_ON(range_overflows(vma->node.start,
+				   vma->node.size,
+				   vma->vm->total));
 	bind_flags = 0;
 	if (flags & PIN_GLOBAL)
 		bind_flags |= I915_VMA_GLOBAL_BIND;
@@ -346,14 +452,22 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (bind_flags == 0)
 		return 0;
 
-	GEM_BUG_ON(!vma->pages);
+	vma->async.dma = async_fence_create(vma->vm->i915);
+	if (!vma->async.dma)
+		return -ENOMEM;
+
+	if ((bind_flags & ~vma_flags) & I915_VMA_LOCAL_BIND)
+		bind_flags |= I915_VMA_ALLOC_BIND;
 
 	trace_i915_vma_bind(vma, bind_flags);
-	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+	if (bind_flags & I915_VMA_ALLOC_BIND)
+		ret = queue_async_bind(vma, cache_level, bind_flags);
+	else
+		ret = __vma_bind( vma, cache_level, bind_flags);
 	if (ret)
 		return ret;
 
-	vma->flags |= bind_flags;
+	vma->flags |= bind_flags & ~I915_VMA_ALLOC_BIND;
 	return 0;
 }
 
@@ -597,7 +711,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	}
 
 	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
+		ret = i915_gem_object_pin_pages_async(vma->obj);
 		if (ret)
 			return ret;
 
@@ -606,25 +720,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		cache_level = 0;
 	}
 
-	GEM_BUG_ON(vma->pages);
-
-	ret = vma->ops->set_pages(vma);
-	if (ret)
-		goto err_unpin;
-
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
 		if (!IS_ALIGNED(offset, alignment) ||
 		    range_overflows(offset, size, end)) {
 			ret = -EINVAL;
-			goto err_clear;
+			goto err_unpin;
 		}
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 					   size, offset, cache_level,
 					   flags);
 		if (ret)
-			goto err_clear;
+			goto err_unpin;
 	} else {
 		/*
 		 * We only support huge gtt pages through the 48b PPGTT,
@@ -663,7 +771,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 					  size, alignment, cache_level,
 					  start, end, flags);
 		if (ret)
-			goto err_clear;
+			goto err_unpin;
 
 		GEM_BUG_ON(vma->node.start < start);
 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -682,8 +790,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 	return 0;
 
-err_clear:
-	vma->ops->clear_pages(vma);
 err_unpin:
 	if (vma->obj)
 		i915_gem_object_unpin_pages(vma->obj);
@@ -810,6 +916,7 @@ void i915_vma_reopen(struct i915_vma *vma)
 
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
+	GEM_BUG_ON(vma->async.dma);
 	GEM_BUG_ON(vma->node.allocated);
 	GEM_BUG_ON(vma->fence);
 
@@ -968,12 +1075,17 @@ int i915_vma_unbind(struct i915_vma *vma)
 	int ret;
 
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+	might_sleep();
+
+	if (vma->flags & I915_VMA_LOCAL_BIND &&
+	    dma_fence_wait_timeout(vma->async.dma, true,
+				   MAX_SCHEDULE_TIMEOUT) < 0)
+		return -EINTR;
 
 	/*
 	 * First wait upon any activity as retiring the request may
 	 * have side-effects such as unpinning or even unbinding this vma.
 	 */
-	might_sleep();
 	if (i915_vma_is_active(vma)) {
 		/*
 		 * When a closed VMA is retired, it is unbound - eek.
@@ -1041,6 +1153,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
+	dma_fence_put(fetch_and_zero(&vma->async.dma));
+
 	i915_vma_remove(vma);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 07ed81ed898b..47cbb9713d73 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -101,6 +101,7 @@ struct i915_vma {
 #define I915_VMA_GLOBAL_BIND	BIT(9)
 #define I915_VMA_LOCAL_BIND	BIT(10)
 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_ALLOC_BIND	I915_VMA_PIN_OVERFLOW /* not stored */
 
 #define I915_VMA_GGTT		BIT(11)
 #define I915_VMA_CAN_FENCE	BIT(12)
@@ -142,6 +143,14 @@ struct i915_vma {
 	unsigned int *exec_flags;
 	struct hlist_node exec_node;
 	u32 exec_handle;
+
+	struct i915_vma_async_bind {
+		struct dma_fence *dma;
+		struct dma_fence_cb cb;
+		struct work_struct work;
+		unsigned int cache_level;
+		unsigned int flags;
+	} async;
 };
 
 struct i915_vma *
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 630669f1c866..a6d97db21ae4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -204,8 +204,10 @@ static int igt_vma_create(void *arg)
 		mock_context_close(ctx);
 	}
 
-	list_for_each_entry_safe(obj, on, &objects, st_link)
+	list_for_each_entry_safe(obj, on, &objects, st_link) {
+		i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT);
 		i915_gem_object_put(obj);
+	}
 	return err;
 }
 
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list