[PATCH 22/22] drm/i915/gem: Pull execbuf dma resv under a single critical section

Chris Wilson chris at chris-wilson.co.uk
Wed Jul 1 19:36:03 UTC 2020


Acquire all the objects and their backing storage, and page directories,
as used by execbuf under a single common ww_mutex. Albeit we have to
restart the critical section a few times in order to handle various
restrictions (such as avoiding copy_(from|to)_user and mmap_sem).

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 141 ++++++++----------
 1 file changed, 60 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 4ab6a079c0f0..f5757c3dc874 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -20,6 +20,7 @@
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_gt_requests.h"
 #include "gt/intel_ring.h"
+#include "mm/i915_acquire_ctx.h"
 
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
@@ -242,6 +243,7 @@ struct i915_execbuffer {
 	struct intel_context *context; /* logical state for the request */
 	struct i915_gem_context *gem_context; /** caller's context */
 
+	struct i915_acquire_ctx acquire; /** lock for all DMA reservations */
 	struct i915_request *request; /** our request to build */
 	struct eb_vma *batch; /** identity of the batch obj/vma */
 
@@ -377,42 +379,6 @@ static void eb_vma_array_put(struct eb_vma_array *arr)
 	kref_put(&arr->kref, eb_vma_array_destroy);
 }
 
-static int
-eb_lock_vma(struct i915_execbuffer *eb, struct ww_acquire_ctx *acquire)
-{
-	struct eb_vma *ev;
-	int err = 0;
-
-	list_for_each_entry(ev, &eb->submit_list, submit_link) {
-		struct i915_vma *vma = ev->vma;
-
-		err = ww_mutex_lock_interruptible(&vma->resv->lock, acquire);
-		if (err == -EDEADLK) {
-			struct eb_vma *unlock = ev, *en;
-
-			list_for_each_entry_safe_continue_reverse(unlock, en,
-								  &eb->submit_list,
-								  submit_link) {
-				ww_mutex_unlock(&unlock->vma->resv->lock);
-				list_move_tail(&unlock->submit_link, &eb->submit_list);
-			}
-
-			GEM_BUG_ON(!list_is_first(&ev->submit_link, &eb->submit_list));
-			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
-							       acquire);
-		}
-		if (err) {
-			list_for_each_entry_continue_reverse(ev,
-							     &eb->submit_list,
-							     submit_link)
-				ww_mutex_unlock(&ev->vma->resv->lock);
-			break;
-		}
-	}
-
-	return err;
-}
-
 static int eb_create(struct i915_execbuffer *eb)
 {
 	/* Allocate an extra slot for use by the sentinel */
@@ -648,6 +614,31 @@ eb_add_vma(struct i915_execbuffer *eb,
 	}
 }
 
+static int eb_reserve_mm(struct i915_execbuffer *eb)
+{
+	struct eb_vma *ev;
+	int err;
+
+	list_for_each_entry(ev, &eb->bind_list, bind_link) {
+		err = i915_acquire_ctx_lock(&eb->acquire, ev->vma->obj);
+		if (err == -EDEADLK) {
+			struct eb_vma *unlock = ev, *en;
+
+			list_for_each_entry_safe_continue_reverse(unlock, en,
+								  &eb->bind_list,
+								  bind_link)
+				list_move_tail(&unlock->bind_link,
+					       &eb->bind_list);
+
+			err = i915_acquire_ctx_backoff(&eb->acquire);
+		}
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 struct eb_vm_work {
 	struct dma_fence_work base;
 	struct list_head unbound;
@@ -1304,10 +1295,13 @@ static int eb_prepare_vma(struct eb_vma *ev, struct eb_vm_work *work)
 	if (ev->flags & __EXEC_OBJECT_NEEDS_MAP)
 		max_size = max_t(u64, max_size, vma->fence_size);
 
+	/* XXX pass eb->acquire to pt_stash for its DMA resv */
 	err = i915_vm_alloc_pt_stash(work->vm, &work->stash, max_size);
+	GEM_BUG_ON(err == -EDEADLK); /* all fresh, no contention */
 	if (err)
 		return err;
 
+	/* XXX just setup vma->pages, holding obj->pages under ww_mutex */
 	return i915_vma_get_pages(ev->vma);
 }
 
@@ -1384,7 +1378,11 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
 	struct list_head last, unbound;
 	unsigned int pass;
 	struct eb_vma *ev;
-	int err = 0;
+	int err;
+
+	err = eb_reserve_mm(eb);
+	if (err)
+		return err;
 
 	INIT_LIST_HEAD(&unbound);
 	list_for_each_entry(ev, &eb->bind_list, bind_link) {
@@ -1502,6 +1500,9 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
 		}
 		list_splice_tail(&last, &unbound);
 
+		/* XXX Not needed if we allow userptr from fence */
+		i915_acquire_ctx_fini(&eb->acquire);
+
 		if (signal_pending(current))
 			return -EINTR;
 
@@ -1513,6 +1514,9 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
 		}
 
 		err = wait_for_unbinds(eb, &unbound, pass++);
+		i915_acquire_ctx_init(&eb->acquire);
+		if (err == 0)
+			err = eb_reserve_mm(eb);
 		if (err)
 			return err;
 	} while (1);
@@ -1923,8 +1927,6 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
 	struct drm_i915_gem_object *obj = vma->obj;
 	int err;
 
-	i915_vma_lock(vma);
-
 	if (obj->cache_dirty & ~obj->cache_coherent)
 		i915_gem_clflush_object(obj, 0);
 	obj->write_domain = 0;
@@ -1933,8 +1935,6 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
 	if (err == 0)
 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 
-	i915_vma_unlock(vma);
-
 	return err;
 }
 
@@ -2196,6 +2196,7 @@ static int eb_relocs_copy_user(struct i915_execbuffer *eb)
 	/* Drop everything before we copy_from_user */
 	list_for_each_entry(ev, &eb->bind_list, bind_link)
 		eb_unreserve_vma(ev);
+	i915_acquire_ctx_fini(&eb->acquire);
 
 	eb->reloc_cache.head.vma = NULL;
 	eb->reloc_cache.pos = N_RELOC;
@@ -2207,6 +2208,7 @@ static int eb_relocs_copy_user(struct i915_execbuffer *eb)
 	}
 
 	/* Now reacquire everything, including the extra reloc bo */
+	i915_acquire_ctx_init(&eb->acquire);
 	return eb_reserve_vm(eb);
 }
 
@@ -2248,11 +2250,9 @@ get_gpu_relocs(struct i915_execbuffer *eb,
 		struct i915_vma *vma = c->head.vma;
 		int err;
 
-		i915_vma_lock(vma);
 		err = i915_request_await_object(rq, vma->obj, false);
 		if (err == 0)
 			err = i915_vma_move_to_active(vma, rq, 0);
-		i915_vma_unlock(vma);
 		if (err)
 			return ERR_PTR(err);
 
@@ -2403,17 +2403,8 @@ static int eb_relocate(struct i915_execbuffer *eb)
 
 static int eb_move_to_gpu(struct i915_execbuffer *eb)
 {
-	struct ww_acquire_ctx acquire;
 	struct eb_vma *ev;
-	int err = 0;
-
-	ww_acquire_init(&acquire, &reservation_ww_class);
-
-	err = eb_lock_vma(eb, &acquire);
-	if (err)
-		goto err_fini;
-
-	ww_acquire_done(&acquire);
+	int err;
 
 	list_for_each_entry(ev, &eb->submit_list, submit_link) {
 		struct i915_vma *vma = ev->vma;
@@ -2450,27 +2441,22 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 				flags &= ~EXEC_OBJECT_ASYNC;
 		}
 
-		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
+		if (!(flags & EXEC_OBJECT_ASYNC)) {
 			err = i915_request_await_object
 				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
+			if (unlikely(err))
+				goto err_skip;
 		}
 
-		if (err == 0)
-			err = i915_vma_move_to_active(vma, eb->request, flags);
-
-		i915_vma_unlock(vma);
+		err = i915_vma_move_to_active(vma, eb->request, flags);
+		if (unlikely(err))
+			goto err_skip;
 	}
-	ww_acquire_fini(&acquire);
-
-	if (unlikely(err))
-		goto err_skip;
 
 	/* Unconditionally flush any chipset caches (for streaming writes). */
 	intel_gt_chipset_flush(eb->engine->gt);
 	return 0;
 
-err_fini:
-	ww_acquire_fini(&acquire);
 err_skip:
 	i915_request_set_error_once(eb->request, err);
 	return err;
@@ -2622,39 +2608,27 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
 	/* Mark active refs early for this worker, in case we get interrupted */
 	err = parser_mark_active(pw, eb->context->timeline);
 	if (err)
-		goto err_commit;
-
-	err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
-	if (err)
-		goto err_commit;
+		goto out;
 
 	err = dma_resv_reserve_shared(pw->batch->resv, 1);
 	if (err)
-		goto err_commit_unlock;
+		goto out;
 
 	/* Wait for all writes (and relocs) into the batch to complete */
 	err = i915_sw_fence_await_reservation(&pw->base.chain,
 					      pw->batch->resv, NULL, false,
 					      0, I915_FENCE_GFP);
 	if (err < 0)
-		goto err_commit_unlock;
+		goto out;
 
 	/* Keep the batch alive and unwritten as we parse */
 	dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
 
-	dma_resv_unlock(pw->batch->resv);
-
 	/* Force execution to wait for completion of the parser */
-	dma_resv_lock(shadow->resv, NULL);
 	dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
-	dma_resv_unlock(shadow->resv);
-
-	dma_fence_work_commit_imm(&pw->base);
-	return 0;
 
-err_commit_unlock:
-	dma_resv_unlock(pw->batch->resv);
-err_commit:
+	err = 0;
+out:
 	i915_sw_fence_set_error_once(&pw->base.chain, err);
 	dma_fence_work_commit_imm(&pw->base);
 	return err;
@@ -3278,6 +3252,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		goto err_context;
 	lockdep_assert_held(&eb.context->timeline->mutex);
 
+	i915_acquire_ctx_init(&eb.acquire);
+
 	err = eb_relocate(&eb);
 	if (err) {
 		/*
@@ -3291,6 +3267,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		goto err_vma;
 	}
 
+	i915_acquire_ctx_done(&eb.acquire);
+
 	err = eb_parse(&eb);
 	if (err)
 		goto err_vma;
@@ -3366,6 +3344,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_vma:
 	if (eb.parser.shadow)
 		intel_gt_buffer_pool_put(eb.parser.shadow->vma->private);
+	i915_acquire_ctx_fini(&eb.acquire);
 	eb_relocs_update_user(&eb);
 	eb_unpin_engine(&eb);
 err_context:
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list