[Intel-gfx] [PATCH 27/28] drm/i915: Allow vma binding to occur asynchronously

Chris Wilson chris at chris-wilson.co.uk
Mon Jun 10 07:21:25 UTC 2019


If we let pages be allocated asynchronously, we also then want to push
the binding process into an asynchronous task. Make it so, utilising the
recent improvements to fence error tracking and struct_mutex reduction.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  16 +-
 .../drm/i915/gem/selftests/i915_gem_context.c |   4 +
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  28 ++-
 drivers/gpu/drm/i915/i915_vma.c               | 160 +++++++++++++++---
 drivers/gpu/drm/i915/i915_vma.h               |  14 ++
 drivers/gpu/drm/i915/selftests/i915_vma.c     |   4 +-
 6 files changed, 189 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index eeb4c6cdb01d..ccb089420830 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -580,6 +580,15 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 	u64 pin_flags;
 	int err;
 
+	/*
+	 * If we load the pages asynchronously, then the user *must*
+	 * obey the reservation_object and not bypass waiting on it.
+	 * On the positive side, if the vma is not yet bound (no pages!),
+	 * then it should not have any annoying implicit fences.
+	 */
+	if (exec_flags & EXEC_OBJECT_ASYNC && !vma->pages)
+		*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+
 	pin_flags = PIN_USER | PIN_NONBLOCK;
 	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
 		pin_flags |= PIN_GLOBAL;
@@ -1187,7 +1196,7 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
 	obj->write_domain = 0;
 
 	err = i915_request_await_object(rq, vma->obj, true);
-	if (err == 0)
+	if (!err)
 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 
 	i915_vma_unlock(vma);
@@ -1245,8 +1254,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto skip_request;
 
 	i915_vma_lock(batch);
-	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
-	err = i915_vma_move_to_active(batch, rq, 0);
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 74b0e5871c4b..e8a438d0b8a2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -295,6 +295,10 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 		goto err_batch;
 	}
 
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err)
+		goto err_request;
+
 	flags = 0;
 	if (INTEL_GEN(vm->i915) <= 5)
 		flags |= I915_DISPATCH_SECURE;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1a157bd43c2c..bfca8a4a88e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,6 +30,7 @@
 #include <linux/random.h>
 #include <linux/seq_file.h>
 #include <linux/stop_machine.h>
+#include <linux/workqueue.h>
 
 #include <asm/set_memory.h>
 
@@ -139,14 +140,14 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
 
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
-			  u32 unused)
+			  u32 flags)
 {
+	struct i915_address_space *vm = vma->vm;
 	u32 pte_flags;
 	int err;
 
-	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
-		err = vma->vm->allocate_va_range(vma->vm,
-						 vma->node.start, vma->size);
+	if (flags & I915_VMA_ALLOC_BIND) {
+		err = vm->allocate_va_range(vm, vma->node.start, vma->size);
 		if (err)
 			return err;
 	}
@@ -156,20 +157,26 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 	if (i915_gem_object_is_readonly(vma->obj))
 		pte_flags |= PTE_READ_ONLY;
 
-	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+	vm->insert_entries(vm, vma, cache_level, pte_flags);
 
 	return 0;
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
-	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+	struct i915_address_space *vm = vma->vm;
+
+	vm->clear_range(vm, vma->node.start, vma->size);
 }
 
 static int ppgtt_set_pages(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->pages);
 
+	wait_for_completion(&vma->obj->mm.completion);
+	if (IS_ERR(vma->obj->mm.pages))
+		return PTR_ERR(vma->obj->mm.pages);
+
 	vma->pages = vma->obj->mm.pages;
 
 	vma->page_sizes = vma->obj->mm.page_sizes;
@@ -2707,7 +2714,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 
-		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+		if (flags & I915_VMA_ALLOC_BIND) {
 			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
 							   vma->node.start,
 							   vma->size);
@@ -3896,13 +3903,18 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 {
 	int ret;
 
-	/* The vma->pages are only valid within the lifespan of the borrowed
+	/*
+	 * The vma->pages are only valid within the lifespan of the borrowed
 	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
 	 * must be the vma->pages. A simple rule is that vma->pages must only
 	 * be accessed when the obj->mm.pages are pinned.
 	 */
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
 
+	wait_for_completion(&vma->obj->mm.completion);
+	if (IS_ERR(vma->obj->mm.pages))
+		return PTR_ERR(vma->obj->mm.pages);
+
 	switch (vma->ggtt_view.type) {
 	default:
 		GEM_BUG_ON(vma->ggtt_view.type);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 2d7af763b928..89079630d0af 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -92,6 +92,70 @@ static void __i915_vma_retire(struct i915_active *ref)
 	i915_vma_put(vma);
 }
 
+static int __vma_bind(struct i915_vma *vma,
+		      unsigned int cache_level,
+		      unsigned int flags)
+{
+	int err;
+
+	if (!vma->pages) {
+		err = vma->ops->set_pages(vma);
+		if (err)
+			return err;
+	}
+
+	return vma->ops->bind_vma(vma, cache_level, flags);
+}
+
+static void do_async_bind(struct work_struct *work)
+{
+	struct i915_vma *vma = container_of(work, typeof(*vma), async.work);
+	struct i915_address_space *vm = vma->vm;
+	int err;
+
+	if (vma->async.dma->error)
+		goto out;
+
+	err = __vma_bind(vma, vma->async.cache_level, vma->async.flags);
+	if (err)
+		dma_fence_set_error(vma->async.dma, err);
+
+out:
+	dma_fence_signal(vma->async.dma);
+
+	i915_vma_unpin(vma);
+	i915_vma_put(vma);
+	i915_vm_put(vm);
+}
+
+static void __queue_async_bind(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	struct i915_vma *vma = container_of(cb, typeof(*vma), async.cb);
+
+	if (f->error)
+		dma_fence_set_error(vma->async.dma, f->error);
+
+	INIT_WORK(&vma->async.work, do_async_bind);
+	queue_work(system_unbound_wq, &vma->async.work);
+}
+
+static const char *async_bind_driver_name(struct dma_fence *fence)
+{
+	return DRIVER_NAME;
+}
+
+static const char *async_bind_timeline_name(struct dma_fence *fence)
+{
+	return "bind";
+}
+
+static const struct dma_fence_ops async_bind_ops = {
+	.get_driver_name = async_bind_driver_name,
+	.get_timeline_name = async_bind_timeline_name,
+};
+
+static DEFINE_SPINLOCK(async_lock);
+
 static struct i915_vma *
 vma_create(struct drm_i915_gem_object *obj,
 	   struct i915_address_space *vm,
@@ -276,6 +340,54 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 	return vma;
 }
 
+static int queue_async_bind(struct i915_vma *vma,
+			    enum i915_cache_level cache_level,
+			    u32 flags)
+{
+	bool ready = true;
+
+	/* We are not allowed to shrink inside vm->mutex! */
+	vma->async.dma = kmalloc(sizeof(*vma->async.dma),
+				 GFP_NOWAIT | __GFP_NOWARN);
+	if (!vma->async.dma)
+		return -ENOMEM;
+
+	dma_fence_init(vma->async.dma,
+		       &async_bind_ops,
+		       &async_lock,
+		       vma->vm->i915->mm.unordered_timeline,
+		       0);
+
+	/* XXX find and avoid allocations under reservation_object locks */
+	if (!i915_vma_trylock(vma)) {
+		kfree(fetch_and_zero(&vma->async.dma));
+		return -EAGAIN;
+	}
+
+	if (rcu_access_pointer(vma->resv->fence_excl)) { /* async pages */
+		struct dma_fence *f = reservation_object_get_excl(vma->resv);
+
+		if (!dma_fence_add_callback(f,
+					    &vma->async.cb,
+					    __queue_async_bind))
+			ready = false;
+	}
+	reservation_object_add_excl_fence(vma->resv, vma->async.dma);
+	i915_vma_unlock(vma);
+
+	i915_vm_get(vma->vm);
+	i915_vma_get(vma);
+	__i915_vma_pin(vma); /* avoid being shrunk */
+
+	vma->async.cache_level = cache_level;
+	vma->async.flags = flags;
+
+	if (ready)
+		__queue_async_bind(vma->async.dma, &vma->async.cb);
+
+	return 0;
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
@@ -293,17 +405,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	u32 vma_flags;
 	int ret;
 
+	GEM_BUG_ON(!flags);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->size > vma->node.size);
-
-	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
-					      vma->node.size,
-					      vma->vm->total)))
-		return -ENODEV;
-
-	if (GEM_DEBUG_WARN_ON(!flags))
-		return -EINVAL;
-
+	GEM_BUG_ON(range_overflows(vma->node.start,
+				   vma->node.size,
+				   vma->vm->total));
 	bind_flags = 0;
 	if (flags & PIN_GLOBAL)
 		bind_flags |= I915_VMA_GLOBAL_BIND;
@@ -318,14 +425,18 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (bind_flags == 0)
 		return 0;
 
-	GEM_BUG_ON(!vma->pages);
+	if ((bind_flags & ~vma_flags) & I915_VMA_LOCAL_BIND)
+		bind_flags |= I915_VMA_ALLOC_BIND;
 
 	trace_i915_vma_bind(vma, bind_flags);
-	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+	if (bind_flags & I915_VMA_ALLOC_BIND)
+		ret = queue_async_bind(vma, cache_level, bind_flags);
+	else
+		ret = __vma_bind(vma, cache_level, bind_flags);
 	if (ret)
 		return ret;
 
-	vma->flags |= bind_flags;
+	vma->flags |= bind_flags & ~I915_VMA_ALLOC_BIND;
 	return 0;
 }
 
@@ -569,7 +680,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	}
 
 	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
+		ret = i915_gem_object_pin_pages_async(vma->obj);
 		if (ret)
 			return ret;
 
@@ -578,25 +689,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		cache_level = 0;
 	}
 
-	GEM_BUG_ON(vma->pages);
-
-	ret = vma->ops->set_pages(vma);
-	if (ret)
-		goto err_unpin;
-
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
 		if (!IS_ALIGNED(offset, alignment) ||
 		    range_overflows(offset, size, end)) {
 			ret = -EINVAL;
-			goto err_clear;
+			goto err_unpin;
 		}
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 					   size, offset, cache_level,
 					   flags);
 		if (ret)
-			goto err_clear;
+			goto err_unpin;
 	} else {
 		/*
 		 * We only support huge gtt pages through the 48b PPGTT,
@@ -635,7 +740,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 					  size, alignment, cache_level,
 					  start, end, flags);
 		if (ret)
-			goto err_clear;
+			goto err_unpin;
 
 		GEM_BUG_ON(vma->node.start < start);
 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -654,8 +759,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 	return 0;
 
-err_clear:
-	vma->ops->clear_pages(vma);
 err_unpin:
 	if (vma->obj)
 		i915_gem_object_unpin_pages(vma->obj);
@@ -790,7 +893,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
 
 		spin_lock(&obj->vma.lock);
 		list_del(&vma->obj_link);
-		rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+		rb_erase(&vma->obj_node, &obj->vma.tree);
 		spin_unlock(&obj->vma.lock);
 	}
 
@@ -930,6 +1033,11 @@ int i915_vma_unbind(struct i915_vma *vma)
 
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
+	if (vma->async.dma &&
+	    dma_fence_wait_timeout(vma->async.dma, true,
+				   MAX_SCHEDULE_TIMEOUT) < 0)
+		return -EINTR;
+
 	ret = i915_active_wait(&vma->active);
 	if (ret)
 		return ret;
@@ -975,6 +1083,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	}
 	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
+	dma_fence_put(fetch_and_zero(&vma->async.dma));
+
 	i915_vma_remove(vma);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 71088ff4ad59..67e43f5d01f6 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -103,6 +103,7 @@ struct i915_vma {
 #define I915_VMA_GLOBAL_BIND	BIT(9)
 #define I915_VMA_LOCAL_BIND	BIT(10)
 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_ALLOC_BIND	I915_VMA_PIN_OVERFLOW /* not stored */
 
 #define I915_VMA_GGTT		BIT(11)
 #define I915_VMA_CAN_FENCE	BIT(12)
@@ -143,6 +144,14 @@ struct i915_vma {
 	unsigned int *exec_flags;
 	struct hlist_node exec_node;
 	u32 exec_handle;
+
+	struct i915_vma_async_bind {
+		struct dma_fence *dma;
+		struct dma_fence_cb cb;
+		struct work_struct work;
+		unsigned int cache_level;
+		unsigned int flags;
+	} async;
 };
 
 struct i915_vma *
@@ -305,6 +314,11 @@ static inline void i915_vma_lock(struct i915_vma *vma)
 	reservation_object_lock(vma->resv, NULL);
 }
 
+static inline bool i915_vma_trylock(struct i915_vma *vma)
+{
+	return reservation_object_trylock(vma->resv);
+}
+
 static inline void i915_vma_unlock(struct i915_vma *vma)
 {
 	reservation_object_unlock(vma->resv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index a166d9405a94..615ac485c731 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -204,8 +204,10 @@ static int igt_vma_create(void *arg)
 		mock_context_close(ctx);
 	}
 
-	list_for_each_entry_safe(obj, on, &objects, st_link)
+	list_for_each_entry_safe(obj, on, &objects, st_link) {
+		i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT);
 		i915_gem_object_put(obj);
+	}
 	return err;
 }
 
-- 
2.20.1



More information about the Intel-gfx mailing list