[PATCH 59/62] drm/i915: Update vma to use async page allocations

Chris Wilson chris at chris-wilson.co.uk
Mon Jun 22 19:57:46 UTC 2020


Since we have asynchronous vma bindings, we are ready to utilise
asynchronous page allocations. All we have to do is ask for the
get_pages not to wait on our behalf, as our workqueue will.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  2 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  1 +
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |  2 +-
 drivers/gpu/drm/i915/i915_vma.c               | 42 +++++++++----------
 drivers/gpu/drm/i915/i915_vma_types.h         |  1 +
 5 files changed, 25 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f122f6e55db5..b9930b306c38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -699,6 +699,8 @@ static int set_bind_fence(struct i915_vma *vma, struct eb_vm_work *work)
 
 	lockdep_assert_held(&vma->vm->mutex);
 	prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
+	if (!prev)
+		prev = i915_active_fence_get(&vma->obj->mm.active.excl);
 	if (unlikely(prev)) {
 		err = i915_sw_fence_await_dma_fence(&work->base.chain, prev, 0,
 						    GFP_NOWAIT | __GFP_NOWARN);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 03a1b859aeef..3bb0939dce99 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -275,6 +275,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
 				 unsigned int sg_page_sizes);
 
+int ____i915_gem_object_get_pages_async(struct drm_i915_gem_object *obj);
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __i915_gem_object_get_pages_locked(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index d0cdf1c93a67..4efd1aeedc2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -95,7 +95,7 @@ static int __i915_gem_object_wait_for_pages(struct drm_i915_gem_object *obj)
 	return 0;
 }
 
-static int ____i915_gem_object_get_pages_async(struct drm_i915_gem_object *obj)
+int ____i915_gem_object_get_pages_async(struct drm_i915_gem_object *obj)
 {
 	int err;
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b34ad6b100e6..0fa8173b6bc7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -123,6 +123,7 @@ vma_create(struct drm_i915_gem_object *obj,
 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
+	vma->fence_context = dma_fence_context_alloc(1);
 
 	/* Declare ourselves safe for use inside shrinkers */
 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -295,7 +296,6 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 struct i915_vma_work {
 	struct dma_fence_work base;
 	struct i915_vma *vma;
-	struct drm_i915_gem_object *pinned;
 	struct i915_sw_dma_fence_cb cb;
 	enum i915_cache_level cache_level;
 	unsigned int flags;
@@ -339,9 +339,6 @@ static void __vma_release(struct dma_fence_work *work)
 		atomic_or(I915_VMA_ERROR, &vma->flags);
 		atomic_and(~vw->flags, &vma->flags);
 	}
-
-	if (vw->pinned)
-		__i915_gem_object_unpin_pages(vw->pinned);
 }
 
 static const struct dma_fence_work_ops bind_ops = {
@@ -453,6 +450,8 @@ int i915_vma_bind(struct i915_vma *vma,
 	 * execution and not content or object's backing store lifetime.
 	 */
 	prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
+	if (!prev && vma->obj)
+		prev = i915_active_fence_get(&vma->obj->mm.active.excl);
 	if (prev) {
 		__i915_sw_fence_await_dma_fence(&work->base.chain,
 						prev,
@@ -464,11 +463,6 @@ int i915_vma_bind(struct i915_vma *vma,
 		i915_sw_fence_set_error_once(&work->base.chain,
 					     PTR_ERR(vma->obj->mm.pages));
 
-	if (vma->obj) {
-		__i915_gem_object_pin_pages(vma->obj);
-		work->pinned = vma->obj;
-	}
-
 	atomic_or(bind_flags, &vma->flags);
 	return 0;
 }
@@ -837,20 +831,27 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
 		return 0;
 
-	if (vma->obj) {
-		err = i915_gem_object_pin_pages(vma->obj);
-		if (err)
-			return err;
-	}
-
 	err = __wait_for_unbind(vma, flags);
 	if (err)
-		goto err_pages;
+		return err;
 
 	work = i915_vma_work();
-	if (!work) {
-		err = -ENOMEM;
-		goto err_pages;
+	if (!work)
+		return -ENOMEM;
+
+	if (vma->obj) {
+		if (dma_resv_lock_interruptible(vma->resv, NULL))
+			return -EINTR;
+
+		err = ____i915_gem_object_get_pages_async(vma->obj);
+		if (err == 0) {
+			err = i915_active_ref(&vma->obj->mm.active,
+					      vma->fence_context,
+					      &work->base.dma);
+		}
+		dma_resv_unlock(vma->resv);
+		if (err)
+			return err;
 	}
 
 	if (flags & PIN_GLOBAL)
@@ -945,9 +946,6 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	dma_fence_work_commit_imm(&work->base);
 	if (wakeref)
 		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
-err_pages:
-	if (vma->obj)
-		i915_gem_object_unpin_pages(vma->obj);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 02c1640bb034..10757319c2a4 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -250,6 +250,7 @@ struct i915_vma {
 #define I915_VMA_GGTT_WRITE	((int)BIT(I915_VMA_GGTT_WRITE_BIT))
 
 	struct i915_active active;
+	u64 fence_context;
 
 	/**
 	 * Support different GGTT views into the same object.
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list