[PATCH 31/64] drm/i915: Fix workarounds selftest, part 1
Maarten Lankhorst
maarten.lankhorst at linux.intel.com
Wed Feb 3 13:19:25 UTC 2021
pin_map needs the ww lock, so ensure we pin both before submission.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_object.h | 3 +
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 12 +++
.../gpu/drm/i915/gt/selftest_workarounds.c | 75 ++++++++++++-------
3 files changed, 63 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 56165db586d6..6ce949e92444 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -437,6 +437,9 @@ void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
+void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
+ enum i915_map_type type);
+
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e947d4c0da1f..a24617af3c93 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -400,6 +400,18 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto out_unlock;
}
+void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ void *ret;
+
+ i915_gem_object_lock(obj, NULL);
+ ret = i915_gem_object_pin_map(obj, type);
+ i915_gem_object_unlock(obj);
+
+ return ret;
+}
+
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index de6136bd10ac..85f79399f72e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -107,11 +107,9 @@ read_nonprivs(struct intel_context *ce, struct i915_vma *result)
if (IS_ERR(rq))
return rq;
- i915_vma_lock(result);
err = i915_request_await_object(rq, result->obj, true);
if (err == 0)
err = i915_vma_move_to_active(result, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(result);
if (err)
goto err_rq;
@@ -180,6 +178,7 @@ static int check_whitelist(struct intel_context *ce)
if (IS_ERR(result))
return PTR_ERR(result);
+ i915_gem_object_lock(result->obj, NULL);
vaddr = i915_gem_object_pin_map(result->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -219,6 +218,7 @@ static int check_whitelist(struct intel_context *ce)
out_map:
i915_gem_object_unpin_map(result->obj);
out_put:
+ i915_gem_object_unlock(result->obj);
i915_vma_put(result);
return err;
}
@@ -475,6 +475,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
for (i = 0; i < engine->whitelist.count; i++) {
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ struct i915_gem_ww_ctx ww;
u64 addr = scratch->node.start;
struct i915_request *rq;
u32 srm, lrm, rsvd;
@@ -490,6 +491,29 @@ static int check_dirty_whitelist(struct intel_context *ce)
ro_reg = ro_register(reg);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ cs = NULL;
+ err = i915_gem_object_lock(scratch->obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock(batch->obj, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_ctx;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_unmap_batch;
+ }
+
/* Clear non priv flags */
reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
@@ -501,12 +525,6 @@ static int check_dirty_whitelist(struct intel_context *ce)
pr_debug("%s: Writing garbage to %x\n",
engine->name, reg);
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- goto out_batch;
- }
-
/* SRM original */
*cs++ = srm;
*cs++ = reg;
@@ -553,11 +571,12 @@ static int check_dirty_whitelist(struct intel_context *ce)
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
+ cs = NULL;
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_batch;
+ goto out_unmap_scratch;
}
if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
@@ -566,20 +585,16 @@ static int check_dirty_whitelist(struct intel_context *ce)
goto err_request;
}
- i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
if (err)
goto err_request;
- i915_vma_lock(scratch);
err = i915_request_await_object(rq, scratch->obj, true);
if (err == 0)
err = i915_vma_move_to_active(scratch, rq,
EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
if (err)
goto err_request;
@@ -595,13 +610,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
intel_gt_set_wedged(engine->gt);
- goto out_batch;
- }
-
- results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
- if (IS_ERR(results)) {
- err = PTR_ERR(results);
- goto out_batch;
+ goto out_unmap_scratch;
}
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
@@ -612,7 +621,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
pr_err("%s: Unable to write to whitelisted register %x\n",
engine->name, reg);
err = -EINVAL;
- goto out_unpin;
+ goto out_unmap_scratch;
}
} else {
rsvd = 0;
@@ -678,15 +687,27 @@ static int check_dirty_whitelist(struct intel_context *ce)
err = -EINVAL;
}
-out_unpin:
+out_unmap_scratch:
i915_gem_object_unpin_map(scratch->obj);
+out_unmap_batch:
+ if (cs)
+ i915_gem_object_unpin_map(batch->obj);
+out_ctx:
+ intel_context_unpin(ce);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
if (err)
break;
}
if (igt_flush_test(engine->i915))
err = -EIO;
-out_batch:
+
i915_vma_unpin_and_release(&batch, 0);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
@@ -820,7 +841,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
if (IS_ERR(batch))
return PTR_ERR(batch);
- cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
@@ -955,11 +976,11 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
u32 *a, *b;
int i, err;
- a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
if (IS_ERR(a))
return PTR_ERR(a);
- b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
if (IS_ERR(b)) {
err = PTR_ERR(b);
goto err_a;
--
2.30.0
More information about the Intel-gfx-trybot
mailing list