[PATCH 14/32] rearrange-lookup

Chris Wilson chris at chris-wilson.co.uk
Thu Jul 9 17:59:15 UTC 2020


---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 102 ++++++++++--------
 1 file changed, 59 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5968d4969330..e21374038b1f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1575,10 +1575,6 @@ static int eb_relocate(struct i915_execbuffer *eb)
 {
 	int err;
 
-	err = eb_lookup_vmas(eb);
-	if (err)
-		return err;
-
 	err = eb_reserve_vm(eb);
 	if (err)
 		return err;
@@ -2141,32 +2137,13 @@ static void __eb_unpin_reloc_engine(struct i915_execbuffer *eb)
 	intel_context_put(ce);
 }
 
-static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+static int eb_lock_engine(struct i915_execbuffer *eb)
 {
+	struct intel_context *ce = eb->context;
 	struct intel_timeline *tl;
 	struct i915_request *rq;
 	int err;
 
-	/*
-	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-	 * EIO if the GPU is already wedged.
-	 */
-	err = intel_gt_terminally_wedged(ce->engine->gt);
-	if (err)
-		return err;
-
-	if (unlikely(intel_context_is_banned(ce)))
-		return -EIO;
-
-	/*
-	 * Pinning the contexts may generate requests in order to acquire
-	 * GGTT space, so do this first before we reserve a seqno for
-	 * ourselves.
-	 */
-	err = intel_context_pin(ce);
-	if (err)
-		return err;
-
 	/*
 	 * Take a local wakeref for preparing to dispatch the execbuf as
 	 * we expect to access the hardware fairly frequently in the
@@ -2213,9 +2190,6 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
 		retire_requests(tl);
 	}
 
-	eb->engine = ce->engine;
-	eb->context = ce;
-
 	err = __eb_pin_reloc_engine(eb);
 	if (err)
 		goto err_exit;
@@ -2230,7 +2204,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
 	return err;
 }
 
-static void eb_unpin_engine(struct i915_execbuffer *eb)
+static void eb_unlock_engine(struct i915_execbuffer *eb)
 {
 	struct intel_context *ce = eb->context;
 
@@ -2241,8 +2215,11 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
 
 	intel_context_exit(ce);
 	intel_context_timeline_unlock(ce->timeline);
+}
 
-	intel_context_unpin(ce);
+static void eb_unpin_engine(struct i915_execbuffer *eb)
+{
+	intel_context_unpin(eb->context);
 }
 
 static unsigned int
@@ -2289,6 +2266,35 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
 	return user_ring_map[user_ring_id];
 }
 
+static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+	int err;
+
+	/*
+	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+	 * EIO if the GPU is already wedged.
+	 */
+	err = intel_gt_terminally_wedged(ce->engine->gt);
+	if (err)
+		return err;
+
+	if (unlikely(intel_context_is_banned(ce)))
+		return -EIO;
+
+	/*
+	 * Pinning the contexts may generate requests in order to acquire
+	 * GGTT space, so do this first before we reserve a seqno for
+	 * ourselves.
+	 */
+	err = intel_context_pin(ce);
+	if (err)
+		return err;
+
+	eb->engine = ce->engine;
+	eb->context = ce;
+	return 0;
+}
+
 static int
 eb_pin_engine(struct i915_execbuffer *eb,
 	      struct drm_file *file,
@@ -2548,10 +2554,18 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
-	/* *** TIMELINE LOCK *** */
 	err = eb_pin_engine(&eb, file, args);
 	if (unlikely(err))
 		goto err_context;
+
+	err = eb_lookup_vmas(&eb);
+	if (unlikely(err))
+		goto err_engine;
+
+	/* *** TIMELINE LOCK *** */
+	err = eb_lock_engine(&eb);
+	if (unlikely(err))
+		goto err_engine;
 	lockdep_assert_held(&eb.context->timeline->mutex);
 
 	err = eb_relocate(&eb);
@@ -2667,23 +2681,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	trace_i915_request_queue(eb.request, eb.batch_flags);
 	err = eb_submit(&eb, batch);
 err_request:
-	add_to_client(eb.request, file);
 	i915_request_get(eb.request);
 	eb_request_add(&eb);
 
 	if (fences)
 		signal_fence_array(&eb, fences);
 
-	if (out_fence) {
-		if (err == 0) {
-			fd_install(out_fence_fd, out_fence->file);
-			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
-			args->rsvd2 |= (u64)out_fence_fd << 32;
-			out_fence_fd = -1;
-		} else {
-			fput(out_fence->file);
-		}
-	}
+	add_to_client(eb.request, file);
 	i915_request_put(eb.request);
 
 err_batch_unpin:
@@ -2695,13 +2699,25 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_vma:
 	if (eb.trampoline)
 		i915_vma_unpin(eb.trampoline);
-	eb_unpin_engine(&eb);
+	eb_unlock_engine(&eb);
 	/* *** TIMELINE UNLOCK *** */
+err_engine:
+	eb_unpin_engine(&eb);
 err_context:
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
 err_out_fence:
+	if (out_fence) {
+		if (err == 0) {
+			fd_install(out_fence_fd, out_fence->file);
+			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
+			args->rsvd2 |= (u64)out_fence_fd << 32;
+			out_fence_fd = -1;
+		} else {
+			fput(out_fence->file);
+		}
+	}
 	if (out_fence_fd != -1)
 		put_unused_fd(out_fence_fd);
 err_in_fence:
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list