[Intel-gfx] [PATCH] drm/i915: Serialize against vma moves

Chris Wilson chris at chris-wilson.co.uk
Mon Aug 19 11:20:33 UTC 2019


Make sure that when submitting requests, we always serialize against
potential vma moves and clflushes.

Time for a i915_request_await_vma() interface!

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c   |  5 +++--
 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c   |  8 ++++++--
 .../drm/i915/gem/selftests/i915_gem_coherency.c  |  4 +++-
 .../drm/i915/gem/selftests/i915_gem_context.c    | 16 ++++++++++++----
 .../gpu/drm/i915/gem/selftests/i915_gem_mman.c   |  5 ++++-
 .../gpu/drm/i915/gem/selftests/igt_gem_utils.c   |  8 ++++++--
 drivers/gpu/drm/i915/gt/intel_renderstate.c      |  4 +++-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c     | 10 ++++++++--
 drivers/gpu/drm/i915/gt/selftest_lrc.c           |  9 ++++++---
 drivers/gpu/drm/i915/gt/selftest_workarounds.c   |  4 +++-
 drivers/gpu/drm/i915/selftests/i915_request.c    |  4 +++-
 11 files changed, 57 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1263b02011f4..53752006d307 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1192,8 +1192,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto skip_request;
 
 	i915_vma_lock(batch);
-	GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
-	err = i915_vma_move_to_active(batch, rq, 0);
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 784585afac25..6415f9a17e2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -106,7 +106,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
 	int err;
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, 0);
+	err = i915_request_await_object(rq, vma->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, 0);
 	i915_vma_unlock(vma);
 	if (unlikely(err))
 		return err;
@@ -171,7 +173,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
 	}
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unlock(vma);
 	if (unlikely(err))
 		goto out_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index a1a4b53cdc4a..0ff7a89aadca 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 	intel_ring_advance(rq, cs);
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unlock(vma);
 	i915_vma_unpin(vma);
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index dd87e6cd612e..3e6f4a65d356 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -666,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 		goto err_request;
 
 	i915_vma_lock(batch);
-	err = i915_vma_move_to_active(batch, rq, 0);
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unlock(vma);
 	if (err)
 		goto skip_request;
@@ -1218,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 		goto err_request;
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, 0);
+	err = i915_request_await_object(rq, vma->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, 0);
 	i915_vma_unlock(vma);
 	if (err)
 		goto skip_request;
@@ -1315,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 		goto err_request;
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unlock(vma);
 	if (err)
 		goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 50aa7e95124d..1d27babff0ce 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
 		}
 
 		i915_vma_lock(vma);
-		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+		err = i915_request_await_object(rq, vma->obj, true);
+		if (err == 0)
+			err = i915_vma_move_to_active(vma, rq,
+						      EXEC_OBJECT_WRITE);
 		i915_vma_unlock(vma);
 
 		i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 42e1e9c58f63..57ece53c1075 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -139,13 +139,17 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
 		goto err_request;
 
 	i915_vma_lock(batch);
-	err = i915_vma_move_to_active(batch, rq, 0);
+	err = i915_request_await_object(rq, batch->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
 		goto skip_request;
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unlock(vma);
 	if (err)
 		goto skip_request;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index be37d4501c67..6d05f9c64178 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -222,7 +222,9 @@ int intel_renderstate_emit(struct i915_request *rq)
 	}
 
 	i915_vma_lock(so.vma);
-	err = i915_vma_move_to_active(so.vma, rq, 0);
+	err = i915_request_await_object(rq, so.vma->obj, false);
+	if (err == 0)
+		err = i915_vma_move_to_active(so.vma, rq, 0);
 	i915_vma_unlock(so.vma);
 err_unpin:
 	i915_vma_unpin(so.vma);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 4484b4447db1..298c4d191439 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma,
 	int err;
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, flags);
+	err = i915_request_await_object(rq, vma->obj,
+					flags & EXEC_OBJECT_WRITE);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, flags);
 	i915_vma_unlock(vma);
 
 	return err;
@@ -1237,7 +1240,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
 	}
 
 	i915_vma_lock(arg.vma);
-	err = i915_vma_move_to_active(arg.vma, rq, flags);
+	err = i915_request_await_object(rq, arg.vma->obj,
+					flags & EXEC_OBJECT_WRITE);
+	if (err == 0)
+		err = i915_vma_move_to_active(arg.vma, rq, flags);
 	i915_vma_unlock(arg.vma);
 
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b797be1627e9..d60b88efe68b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1459,12 +1459,15 @@ static int smoke_submit(struct preempt_smoke *smoke,
 
 	if (vma) {
 		i915_vma_lock(vma);
-		err = rq->engine->emit_bb_start(rq,
-						vma->node.start,
-						PAGE_SIZE, 0);
+		err = i915_request_await_object(rq, vma->obj, false);
 		if (!err)
 			err = i915_vma_move_to_active(vma, rq, 0);
+		if (!err)
+			err = rq->engine->emit_bb_start(rq,
+							vma->node.start,
+							PAGE_SIZE, 0);
 		i915_vma_unlock(vma);
+
 	}
 
 	i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index eb5da01d0a08..d06d68ac2a3b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -113,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	}
 
 	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unlock(vma);
 	if (err)
 		goto err_req;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 3937d43c2961..170e4afa9ccb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -876,7 +876,9 @@ static int live_all_engines(void *arg)
 		request[id]->batch = batch;
 
 		i915_vma_lock(batch);
-		err = i915_vma_move_to_active(batch, request[id], 0);
+		err = i915_request_await_object(request[id], batch->obj, 0);
+		if (err == 0)
+			err = i915_vma_move_to_active(batch, request[id], 0);
 		i915_vma_unlock(batch);
 		GEM_BUG_ON(err);
 
-- 
2.23.0.rc1



More information about the Intel-gfx mailing list