[PATCH 120/123] async-vma-bind

Chris Wilson chris at chris-wilson.co.uk
Thu Oct 11 20:21:42 UTC 2018


---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c    |   5 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  24 ++-
 drivers/gpu/drm/i915/i915_vma.c               | 158 ++++++++++++++++--
 drivers/gpu/drm/i915/i915_vma.h               |   5 +
 .../gpu/drm/i915/selftests/i915_gem_context.c |   4 +
 drivers/gpu/drm/i915/selftests/i915_vma.c     |   6 +-
 6 files changed, 182 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 32a3c8cc1de9..9ebe48464733 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1184,6 +1184,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto err_unpin;
 	}
 
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err)
+		goto err_request;
+
 	err = i915_request_await_object(rq, vma->obj, true);
 	if (err)
 		goto err_request;
@@ -1194,7 +1198,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (err)
 		goto err_request;
 
-	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
 	err = i915_vma_move_to_active(batch, rq, 0);
 	if (err)
 		goto skip_request;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 99e54ae4f2c3..57822ed01c28 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -136,14 +136,16 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
 
 static int ppgtt_bind_vma(struct i915_vma *vma,
 			  enum i915_cache_level cache_level,
-			  u32 unused)
+			  u32 flags)
 {
+	struct i915_address_space *vm = vma->vm;
 	u32 pte_flags;
 	int err;
 
-	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
-		err = vma->vm->allocate_va_range(vma->vm,
-						 vma->node.start, vma->size);
+	if (flags & I915_VMA_ALLOC_BIND) {
+		mutex_lock(&vm->mutex);
+		err = vm->allocate_va_range(vm, vma->node.start, vma->size);
+		mutex_unlock(&vm->mutex);
 		if (err)
 			return err;
 	}
@@ -153,14 +155,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
 	if (i915_gem_object_is_readonly(vma->obj))
 		pte_flags |= PTE_READ_ONLY;
 
-	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+	vm->insert_entries(vm, vma, cache_level, pte_flags);
 
 	return 0;
 }
 
 static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
-	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+	struct i915_address_space *vm = vma->vm;
+
+	mutex_lock(&vm->mutex);
+	vm->clear_range(vm, vma->node.start, vma->size);
+	mutex_unlock(&vm->mutex);
 }
 
 static int ppgtt_set_pages(struct i915_vma *vma)
@@ -2555,10 +2561,12 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 
-		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+		if (flags & I915_VMA_ALLOC_BIND) {
+			mutex_lock(&appgtt->vm.mutex);
 			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
 							   vma->node.start,
 							   vma->size);
+			mutex_unlock(&appgtt->vm.mutex);
 			if (ret)
 				return ret;
 		}
@@ -2594,7 +2602,9 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
 	if (vma->flags & I915_VMA_LOCAL_BIND) {
 		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
 
+		mutex_lock(&vm->mutex);
 		vm->clear_range(vm, vma->node.start, vma->size);
+		mutex_unlock(&vm->mutex);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e7e84bb8134c..126fe54dca24 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -303,6 +303,125 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 	return vma;
 }
 
+struct async_bind {
+	struct dma_fence dma;
+	struct work_struct work;
+	struct i915_sw_fence wait;
+	struct i915_vma *vma;
+	enum i915_cache_level cache_level;
+	u32 flags;
+};
+
+static void do_async_bind(struct work_struct *work)
+{
+	struct async_bind *ab = container_of(work, typeof(*ab), work);
+	struct i915_vma *vma = ab->vma;
+	int err;
+
+	err = ab->wait.error;
+	if (!err)
+		err = vma->ops->bind_vma(vma, ab->cache_level, ab->flags);
+	if (err) {
+		smp_store_mb(vma->error, err);
+		dma_fence_set_error(&ab->dma, err);
+	}
+
+	complete_all(&vma->completion);
+	i915_vma_put(vma);
+
+	dma_fence_signal(&ab->dma);
+	dma_fence_put(&ab->dma);
+}
+
+static int __i915_sw_fence_call
+async_bind_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	struct async_bind *ab = container_of(fence, typeof(*ab), wait);
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		queue_work(system_unbound_wq, &ab->work);
+		break;
+
+	case FENCE_FREE:
+		dma_fence_put(&ab->dma);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static const char *async_bind_driver_name(struct dma_fence *fence)
+{
+	return DRIVER_NAME;
+}
+
+static const char *async_bind_timeline_name(struct dma_fence *fence)
+{
+	return "bind";
+}
+
+static void async_bind_release(struct dma_fence *fence)
+{
+	struct async_bind *ab = container_of(fence, typeof(*ab), dma);
+
+	i915_sw_fence_fini(&ab->wait);
+
+	BUILD_BUG_ON(offsetof(typeof(*ab), dma));
+	dma_fence_free(&ab->dma);
+}
+
+static const struct dma_fence_ops async_bind_ops = {
+	.get_driver_name = async_bind_driver_name,
+	.get_timeline_name = async_bind_timeline_name,
+	.release = async_bind_release,
+};
+
+static DEFINE_SPINLOCK(async_lock);
+
+static int queue_async_bind(struct i915_vma *vma,
+			    enum i915_cache_level cache_level,
+			    u32 flags)
+{
+	struct async_bind *ab;
+
+	ab = kmalloc(sizeof(*ab), GFP_KERNEL);
+	if (!ab)
+		return -ENOMEM;
+
+	INIT_WORK(&ab->work, do_async_bind);
+
+	dma_fence_init(&ab->dma,
+		       &async_bind_ops,
+		       &async_lock,
+		       vma->vm->i915->mm.unordered_timeline,
+		       0);
+	i915_sw_fence_init(&ab->wait, async_bind_notify);
+
+	ab->vma = i915_vma_get(vma);
+	ab->cache_level = cache_level;
+	ab->flags = flags;
+
+	dma_fence_get(&ab->dma);
+	if (i915_sw_fence_await_reservation(&ab->wait,
+					    vma->resv, NULL,
+					    true, I915_FENCE_TIMEOUT,
+					    I915_FENCE_GFP) < 0) {
+		i915_vma_put(vma);
+		async_bind_release(&ab->dma);
+		return -ENOMEM;
+	}
+
+	reservation_object_lock(vma->resv, NULL);
+	reservation_object_add_excl_fence(vma->resv, &ab->dma);
+	reservation_object_unlock(vma->resv);
+
+	init_completion(&vma->completion);
+	i915_sw_fence_commit(&ab->wait);
+
+	return 0;
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
@@ -320,17 +439,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	u32 vma_flags;
 	int ret;
 
+	GEM_BUG_ON(!flags);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(vma->size > vma->node.size);
-
-	if (GEM_WARN_ON(range_overflows(vma->node.start,
-					vma->node.size,
-					vma->vm->total)))
-		return -ENODEV;
-
-	if (GEM_WARN_ON(!flags))
-		return -EINVAL;
-
+	GEM_BUG_ON(range_overflows(vma->node.start,
+				   vma->node.size,
+				   vma->vm->total));
 	bind_flags = 0;
 	if (flags & PIN_GLOBAL)
 		bind_flags |= I915_VMA_GLOBAL_BIND;
@@ -347,12 +461,18 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 
 	GEM_BUG_ON(!vma->pages);
 
+	if (!(flags & PIN_UPDATE))
+		bind_flags |= I915_VMA_ALLOC_BIND;
+
 	trace_i915_vma_bind(vma, bind_flags);
-	ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+	if (bind_flags & I915_VMA_LOCAL_BIND)
+		ret = queue_async_bind(vma, cache_level, bind_flags);
+	else
+		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
 	if (ret)
 		return ret;
 
-	vma->flags |= bind_flags;
+	vma->flags |= bind_flags & ~I915_VMA_ALLOC_BIND;
 	return 0;
 }
 
@@ -1026,10 +1146,20 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 	struct i915_gem_active *active;
+	int err;
 
 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
+	/*
+	 * The error should be set before the async chain is decoupled. If
+	 * the async bind is still pending, then we shall report the error
+	 * by propagating it along the chain of fences.
+	 */
+	err = READ_ONCE(vma->error);
+	if (err)
+		return err;
+
 	active = active_instance(vma, rq->fence.context);
 	if (IS_ERR(active))
 		return PTR_ERR(active);
@@ -1125,6 +1255,12 @@ int i915_vma_unbind(struct i915_vma *vma)
 	if (!drm_mm_node_allocated(&vma->node))
 		return 0;
 
+	if (vma->flags & I915_VMA_LOCAL_BIND) {
+		ret = wait_for_completion_interruptible(&vma->completion);
+		if (ret)
+			return ret;
+	}
+
 	if (i915_vma_is_map_and_fenceable(vma)) {
 		/*
 		 * Check that we have flushed all writes through the GGTT
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 7b60d5712d31..0ef4a76766a0 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -25,6 +25,7 @@
 #ifndef __I915_VMA_H__
 #define __I915_VMA_H__
 
+#include <linux/completion.h>
 #include <linux/io-mapping.h>
 #include <linux/rbtree.h>
 
@@ -87,6 +88,7 @@ struct i915_vma {
 #define I915_VMA_GLOBAL_BIND	BIT(6)
 #define I915_VMA_LOCAL_BIND	BIT(7)
 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_ALLOC_BIND	BIT(8) /* not stored, only for bind_vma() */
 
 #define I915_VMA_GGTT		BIT(8)
 #define I915_VMA_CAN_FENCE	BIT(9)
@@ -95,6 +97,9 @@ struct i915_vma {
 #define I915_VMA_USERFAULT	BIT(I915_VMA_USERFAULT_BIT)
 #define I915_VMA_GGTT_WRITE	BIT(12)
 
+	struct completion completion;
+	int error;
+
 	unsigned int active_count;
 	struct rb_root active;
 	struct i915_gem_active last_active;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 946363c682a6..39819e6bbfdc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -356,6 +356,10 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 		goto err_batch;
 	}
 
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err)
+		goto err_request;
+
 	flags = 0;
 	if (INTEL_GEN(vm->i915) <= 5)
 		flags |= I915_DISPATCH_SECURE;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 741f366fb8e4..278a746e3396 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -200,8 +200,12 @@ static int igt_vma_create(void *arg)
 		mock_context_close(ctx);
 	}
 
-	list_for_each_entry_safe(obj, on, &objects, st_link)
+	list_for_each_entry_safe(obj, on, &objects, st_link) {
+		i915_gem_object_wait(obj,
+				     I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT,
+				     NULL);
 		i915_gem_object_put(obj);
+	}
 	return err;
 }
 
-- 
2.19.1



More information about the Intel-gfx-trybot mailing list