[Intel-gfx] [PATCH v8 53/69] drm/i915/selftests: Prepare execlists and lrc selftests for obj->mm.lock removal

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Thu Mar 11 13:42:33 UTC 2021


Convert normal functions to unlocked versions where needed.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
 drivers/gpu/drm/i915/gt/selftest_execlists.c | 18 +++++++++---------
 drivers/gpu/drm/i915/gt/selftest_lrc.c       | 16 ++++++++--------
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index a6e77a161b70..e97825447dca 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -982,7 +982,7 @@ static int live_timeslice_preempt(void *arg)
 		goto err_obj;
 	}
 
-	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
 		goto err_obj;
@@ -1289,7 +1289,7 @@ static int live_timeslice_queue(void *arg)
 		goto err_obj;
 	}
 
-	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
 	if (IS_ERR(vaddr)) {
 		err = PTR_ERR(vaddr);
 		goto err_obj;
@@ -1531,7 +1531,7 @@ static int live_busywait_preempt(void *arg)
 		goto err_ctx_lo;
 	}
 
-	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
 	if (IS_ERR(map)) {
 		err = PTR_ERR(map);
 		goto err_obj;
@@ -2691,7 +2691,7 @@ static int create_gang(struct intel_engine_cs *engine,
 	if (err)
 		goto err_obj;
 
-	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
 	if (IS_ERR(cs)) {
 		err = PTR_ERR(cs);
 		goto err_obj;
@@ -2970,7 +2970,7 @@ static int live_preempt_gang(void *arg)
 		 * it will terminate the next lowest spinner until there
 		 * are no more spinners and the gang is complete.
 		 */
-		cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+		cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
 		if (!IS_ERR(cs)) {
 			*cs = 0;
 			i915_gem_object_unpin_map(rq->batch->obj);
@@ -3035,7 +3035,7 @@ create_gpr_user(struct intel_engine_cs *engine,
 		return ERR_PTR(err);
 	}
 
-	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
 	if (IS_ERR(cs)) {
 		i915_vma_put(vma);
 		return ERR_CAST(cs);
@@ -3239,7 +3239,7 @@ static int live_preempt_user(void *arg)
 	if (IS_ERR(global))
 		return PTR_ERR(global);
 
-	result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+	result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
 	if (IS_ERR(result)) {
 		i915_vma_unpin_and_release(&global, 0);
 		return PTR_ERR(result);
@@ -3626,7 +3626,7 @@ static int live_preempt_smoke(void *arg)
 		goto err_free;
 	}
 
-	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+	cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
 	if (IS_ERR(cs)) {
 		err = PTR_ERR(cs);
 		goto err_batch;
@@ -4231,7 +4231,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 		goto out_end;
 	}
 
-	cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+	cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
 	if (IS_ERR(cs)) {
 		err = PTR_ERR(cs);
 		goto out_end;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 1f7a120606e6..5726943d7ff0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
 		goto err_rq;
 	}
 
-	cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+	cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
 	if (IS_ERR(cs)) {
 		err = PTR_ERR(cs);
 		goto err_rq;
@@ -923,7 +923,7 @@ store_context(struct intel_context *ce,
 	if (IS_ERR(batch))
 		return batch;
 
-	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
 	if (IS_ERR(cs)) {
 		i915_vma_put(batch);
 		return ERR_CAST(cs);
@@ -1138,7 +1138,7 @@ load_context(struct intel_context *ce,
 	if (IS_ERR(batch))
 		return batch;
 
-	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+	cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
 	if (IS_ERR(cs)) {
 		i915_vma_put(batch);
 		return ERR_CAST(cs);
@@ -1277,29 +1277,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
 	u32 *defaults;
 	int err = 0;
 
-	A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+	A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
 	if (IS_ERR(A[0]))
 		return PTR_ERR(A[0]);
 
-	A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+	A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
 	if (IS_ERR(A[1])) {
 		err = PTR_ERR(A[1]);
 		goto err_A0;
 	}
 
-	B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+	B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
 	if (IS_ERR(B[0])) {
 		err = PTR_ERR(B[0]);
 		goto err_A1;
 	}
 
-	B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+	B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
 	if (IS_ERR(B[1])) {
 		err = PTR_ERR(B[1]);
 		goto err_B0;
 	}
 
-	lrc = i915_gem_object_pin_map(ce->state->obj,
+	lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
 				      i915_coherent_map_type(engine->i915));
 	if (IS_ERR(lrc)) {
 		err = PTR_ERR(lrc);
-- 
2.30.1



More information about the Intel-gfx mailing list