[PATCH 53/63] drm/i915/selftests: Prepare execlists and lrc selftests for obj->mm.lock removal
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Mon Jan 25 10:40:33 UTC 2021
Convert normal functions to unlocked versions where needed.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gt/selftest_execlists.c | 18 +++++++++---------
drivers/gpu/drm/i915/gt/selftest_lrc.c | 16 ++++++++--------
2 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index e476da0053df..0f49f38e1187 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -988,7 +988,7 @@ static int live_timeslice_preempt(void *arg)
goto err_obj;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
@@ -1295,7 +1295,7 @@ static int live_timeslice_queue(void *arg)
goto err_obj;
}
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
@@ -1538,7 +1538,7 @@ static int live_busywait_preempt(void *arg)
goto err_ctx_lo;
}
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_obj;
@@ -2702,7 +2702,7 @@ static int create_gang(struct intel_engine_cs *engine,
if (err)
goto err_obj;
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_obj;
@@ -2983,7 +2983,7 @@ static int live_preempt_gang(void *arg)
* it will terminate the next lowest spinner until there
* are no more spinners and the gang is complete.
*/
- cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
if (!IS_ERR(cs)) {
*cs = 0;
i915_gem_object_unpin_map(rq->batch->obj);
@@ -3048,7 +3048,7 @@ create_gpr_user(struct intel_engine_cs *engine,
return ERR_PTR(err);
}
- cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(vma);
return ERR_CAST(cs);
@@ -3255,7 +3255,7 @@ static int live_preempt_user(void *arg)
if (IS_ERR(global))
return PTR_ERR(global);
- result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
+ result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
if (IS_ERR(result)) {
i915_vma_unpin_and_release(&global, 0);
return PTR_ERR(result);
@@ -3642,7 +3642,7 @@ static int live_preempt_smoke(void *arg)
goto err_free;
}
- cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
@@ -4247,7 +4247,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
goto out_end;
}
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto out_end;
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index f74addad98ac..ad90a52c4172 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -627,7 +627,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine,
goto err_rq;
}
- cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
@@ -920,7 +920,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
if (IS_ERR(batch))
return batch;
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
@@ -1084,7 +1084,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
if (IS_ERR(batch))
return batch;
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
@@ -1198,29 +1198,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
u32 *defaults;
int err = 0;
- A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
if (IS_ERR(A[0]))
return PTR_ERR(A[0]);
- A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
if (IS_ERR(A[1])) {
err = PTR_ERR(A[1]);
goto err_A0;
}
- B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
if (IS_ERR(B[0])) {
err = PTR_ERR(B[0]);
goto err_A1;
}
- B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
if (IS_ERR(B[1])) {
err = PTR_ERR(B[1]);
goto err_B0;
}
- lrc = i915_gem_object_pin_map(ce->state->obj,
+ lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
i915_coherent_map_type(engine->i915));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
--
2.30.0
More information about the Intel-gfx-trybot
mailing list