[PATCH 2/2] drm/i915: Store whitelist registers in the default context image

Chris Wilson chris at chris-wilson.co.uk
Thu Apr 12 12:05:14 UTC 2018


As the RING_NONPRIV (whitelist) registers are stored inside the context,
we need only emit them once when setting up the default context image
that is then copied into every user context.

To validate the expectations that the NONPRIV are stored inside contexts
and so preserved across resets, we include a selftest.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c           |  14 +-
 drivers/gpu/drm/i915/i915_drv.h               |   1 -
 drivers/gpu/drm/i915/intel_lrc.c              |   8 -
 drivers/gpu/drm/i915/intel_ringbuffer.c       |   4 -
 drivers/gpu/drm/i915/intel_workarounds.c      | 236 ++++++++----------
 drivers/gpu/drm/i915/intel_workarounds.h      |   2 -
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 .../drm/i915/selftests/intel_workarounds.c    | 231 +++++++++++++++++
 8 files changed, 331 insertions(+), 166 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/selftests/intel_workarounds.c

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2e6652a9bb9e..e0274f41bc76 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3304,24 +3304,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
-	int i;
-	int ret;
-	struct intel_engine_cs *engine;
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
-	enum intel_engine_id id;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
+	int i;
 
 	intel_runtime_pm_get(dev_priv);
 
 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-	for_each_engine(engine, dev_priv, id)
-		seq_printf(m, "HW whitelist count for %s: %d\n",
-			   engine->name, workarounds->hw_whitelist_count[id]);
 	for (i = 0; i < workarounds->count; ++i) {
 		i915_reg_t addr;
 		u32 mask, value, read;
@@ -3337,7 +3326,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 	}
 
 	intel_runtime_pm_put(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 649c0f2f3bae..15e1260be58e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1297,7 +1297,6 @@ struct i915_wa_reg {
 struct i915_workarounds {
 	struct i915_wa_reg reg[I915_MAX_WA_REGS];
 	u32 count;
-	u32 hw_whitelist_count[I915_NUM_ENGINES];
 };
 
 struct i915_virtual_gpu {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c7c85134a84a..e8e9b730aad6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1744,10 +1744,6 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
 	 * programmed to '1' on all products.
@@ -1769,10 +1765,6 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 757bb0990c07..d2ac4435c199 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -618,10 +618,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	ret = intel_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 	if (IS_GEN(dev_priv, 4, 6))
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 16285b9ea5a3..b7d30c2107aa 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -475,205 +475,161 @@ int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
-				 i915_reg_t reg)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	struct i915_workarounds *wa = &dev_priv->workarounds;
-	const unsigned int index = wa->hw_whitelist_count[engine->id];
+struct whitelist {
+	i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
+	unsigned int count;
+};
 
-	if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
-		return -EINVAL;
+static void whitelist_reg(struct whitelist *wa, i915_reg_t reg)
+{
+	const unsigned int index = wa->count++;
 
-	I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
-		   i915_mmio_reg_offset(reg));
-	wa->hw_whitelist_count[engine->id]++;
+	if (GEM_WARN_ON(index >= ARRAY_SIZE(wa->reg)))
+		return;
 
-	return 0;
+	wa->reg[index] = reg;
 }
 
-static int bdw_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void bdw_whitelist_apply(struct whitelist *wa)
 {
-	return 0;
 }
 
-static int chv_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void chv_whitelist_apply(struct whitelist *wa)
 {
-	return 0;
 }
 
-static int gen9_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void gen9_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
-	if (ret)
-		return ret;
+	whitelist_reg(wa, GEN9_CTX_PREEMPT_REG);
 
 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
+	whitelist_reg(wa, GEN8_CS_CHICKEN1);
 
 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(wa, GEN8_HDC_CHICKEN1);
 }
 
-static int skl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void skl_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	gen9_whitelist_apply(wa);
 
 	/* WaDisableLSQCROPERFforOCL:skl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(wa, GEN8_L3SQCREG4);
 }
 
-static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void bxt_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
-	return 0;
+	gen9_whitelist_apply(wa);
 }
 
-static int kbl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void kbl_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	gen9_whitelist_apply(wa);
 
 	/* WaDisableLSQCROPERFforOCL:kbl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(wa, GEN8_L3SQCREG4);
 }
 
-static int glk_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void glk_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
+	gen9_whitelist_apply(wa);
 
 	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
-	ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(wa, GEN9_SLICE_COMMON_ECO_CHICKEN1);
 }
 
-static int cfl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void cfl_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
-	ret = gen9_whitelist_workarounds_apply(engine);
-	if (ret)
-		return ret;
-
-	return 0;
+	gen9_whitelist_apply(wa);
 }
 
-static int cnl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void cnl_whitelist_apply(struct whitelist *wa)
 {
-	int ret;
-
 	/* WaEnablePreemptionGranularityControlByUMD:cnl */
-	ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
-	if (ret)
-		return ret;
-
-	return 0;
+	whitelist_reg(wa, GEN8_CS_CHICKEN1);
 }
 
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int whitelist_emit(struct i915_request *rq, const struct whitelist *wa)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
-	int err = 0;
+	const u32 base = rq->engine->mmio_base;
+	unsigned int i;
+	u32 *cs;
 
-	WARN_ON(engine->id != RCS);
+	if (!wa->count)
+		return 0;
 
-	dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+	cs = intel_ring_begin(rq, 2 * wa->count + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	if (INTEL_GEN(dev_priv) < 8)
-		err = 0;
-	else if (IS_BROADWELL(dev_priv))
-		err = bdw_whitelist_workarounds_apply(engine);
-	else if (IS_CHERRYVIEW(dev_priv))
-		err = chv_whitelist_workarounds_apply(engine);
-	else if (IS_SKYLAKE(dev_priv))
-		err = skl_whitelist_workarounds_apply(engine);
-	else if (IS_BROXTON(dev_priv))
-		err = bxt_whitelist_workarounds_apply(engine);
-	else if (IS_KABYLAKE(dev_priv))
-		err = kbl_whitelist_workarounds_apply(engine);
-	else if (IS_GEMINILAKE(dev_priv))
-		err = glk_whitelist_workarounds_apply(engine);
-	else if (IS_COFFEELAKE(dev_priv))
-		err = cfl_whitelist_workarounds_apply(engine);
-	else if (IS_CANNONLAKE(dev_priv))
-		err = cnl_whitelist_workarounds_apply(engine);
-	else
-		MISSING_CASE(INTEL_GEN(dev_priv));
-	if (err)
-		return err;
+	*cs++ = MI_LOAD_REGISTER_IMM(wa->count);
+	for (i = 0; i < wa->count; i++) {
+		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
+		*cs++ = i915_mmio_reg_offset(wa->reg[i]);
+	}
+	*cs++ = MI_NOOP;
 
-	DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %d\n", engine->name,
-			 dev_priv->workarounds.hw_whitelist_count[engine->id]);
 	return 0;
 }
 
+static struct whitelist *whitelist_apply(struct intel_engine_cs *engine,
+					 struct whitelist *wa)
+{
+	struct drm_i915_private *i915 = engine->i915;
+
+	GEM_BUG_ON(engine->id != RCS);
+
+	wa->count = 0;
+
+	if (INTEL_GEN(i915) < 8)
+		;
+	else if (IS_BROADWELL(i915))
+		bdw_whitelist_apply(wa);
+	else if (IS_CHERRYVIEW(i915))
+		chv_whitelist_apply(wa);
+	else if (IS_SKYLAKE(i915))
+		skl_whitelist_apply(wa);
+	else if (IS_BROXTON(i915))
+		bxt_whitelist_apply(wa);
+	else if (IS_KABYLAKE(i915))
+		kbl_whitelist_apply(wa);
+	else if (IS_GEMINILAKE(i915))
+		glk_whitelist_apply(wa);
+	else if (IS_COFFEELAKE(i915))
+		cfl_whitelist_apply(wa);
+	else if (IS_CANNONLAKE(i915))
+		cnl_whitelist_apply(wa);
+	else
+		MISSING_CASE(INTEL_GEN(i915));
+
+	return wa;
+}
+
 int intel_ctx_workarounds_emit(struct i915_request *rq)
 {
 	struct i915_workarounds *w = &rq->i915->workarounds;
-	u32 *cs;
-	int ret, i;
+	struct whitelist wa;
 
-	if (w->count == 0)
-		return 0;
+	if (w->count) {
+		u32 *cs;
+		int i;
 
-	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
-	if (ret)
-		return ret;
+		cs = intel_ring_begin(rq, (w->count * 2 + 2));
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
 
-	cs = intel_ring_begin(rq, (w->count * 2 + 2));
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
+		*cs++ = MI_LOAD_REGISTER_IMM(w->count);
+		for (i = 0; i < w->count; i++) {
+			*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+			*cs++ = w->reg[i].value;
+		}
+		*cs++ = MI_NOOP;
 
-	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
-	for (i = 0; i < w->count; i++) {
-		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
-		*cs++ = w->reg[i].value;
+		intel_ring_advance(rq, cs);
 	}
-	*cs++ = MI_NOOP;
 
-	intel_ring_advance(rq, cs);
-
-	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
-	if (ret)
-		return ret;
-
-	return 0;
+	return whitelist_emit(rq, whitelist_apply(rq->engine, &wa));
 }
 
 static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -854,3 +810,7 @@ void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 	else
 		MISSING_CASE(INTEL_GEN(dev_priv));
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_workarounds.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index d9b0cc5afb4a..90503ad0c226 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -12,6 +12,4 @@ int intel_ctx_workarounds_emit(struct i915_request *rq);
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
 
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
-
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 8bf6aa573226..a00e2bd08bce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -11,6 +11,7 @@
  */
 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
 selftest(uncore, intel_uncore_live_selftests)
+selftest(workarounds, intel_workarounds_live_selftests)
 selftest(requests, i915_request_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
new file mode 100644
index 000000000000..111e3886b980
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -0,0 +1,231 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+
+static struct drm_i915_gem_object *
+read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_object *result;
+	struct i915_address_space *vm = &ctx->ppgtt->base;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	const u32 base = engine->mmio_base;
+	u32 *cs;
+	int err;
+	int i;
+
+	result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+	if (IS_ERR(result))
+		return result;
+
+	vma = i915_vma_instance(result, vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err_obj;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_pin;
+	}
+
+	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
+	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+		u64 addr = vma->node.start + sizeof(u32) * i;
+
+		*cs++ = MI_STORE_REGISTER_MEM + (INTEL_GEN(ctx->i915) >= 8);
+		*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
+		*cs++ = lower_32_bits(addr);
+		*cs++ = upper_32_bits(addr); /* will be MI_NOOP for gen < 8 */
+	}
+	intel_ring_advance(rq, cs);
+
+	i915_request_add(rq);
+	i915_vma_unpin(vma);
+
+	return result;
+
+err_pin:
+	i915_vma_unpin(vma);
+err_obj:
+	i915_gem_object_put(result);
+	return ERR_PTR(err);
+}
+
+static void print_results(const struct whitelist *wa, const u32 *results)
+{
+	int i;
+
+	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+		u32 expected =
+			i >= wa->count ? 0 : i915_mmio_reg_offset(wa->reg[i]);
+		u32 actual = results[i];
+
+		pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
+			i, expected, actual);
+	}
+}
+
+static int check_whitelist(const struct whitelist *wa,
+			   struct i915_gem_context *ctx,
+			   struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_object *results;
+	u32 *vaddr;
+	int err;
+	int i;
+
+	results = read_nonprivs(ctx, engine);
+	if (IS_ERR(results))
+		return PTR_ERR(results);
+
+	err = i915_gem_object_set_to_cpu_domain(results, false);
+	if (err)
+		goto out_put;
+
+	vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto out_put;
+	}
+
+	for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
+		u32 expected =
+			i >= wa->count ? 0 : i915_mmio_reg_offset(wa->reg[i]);
+		u32 actual = vaddr[i];
+
+		if (expected != actual) {
+			print_results(wa, vaddr);
+			pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
+			       i, expected, actual);
+
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	i915_gem_object_unpin_map(results);
+out_put:
+	i915_gem_object_put(results);
+	return err;
+}
+
+static int do_device_reset(struct intel_engine_cs *engine)
+{
+	i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+	return 0;
+}
+
+static int do_engine_reset(struct intel_engine_cs *engine)
+{
+	return i915_reset_engine(engine, NULL);
+}
+
+static int check_whitelist_across_reset(struct intel_engine_cs *engine,
+					int (*reset)(struct intel_engine_cs *),
+					const struct whitelist *wa,
+					const char *name)
+{
+	struct i915_gem_context *ctx;
+	int err;
+
+	ctx = kernel_context(engine->i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	err = check_whitelist(wa, ctx, engine);
+	if (err) {
+		pr_err("Invalid whitelist *before* %s reset!\n", name);
+		kernel_context_close(ctx);
+		return err;
+	}
+
+	err = reset(engine);
+	if (err) {
+		pr_err("%s reset failed\n", name);
+		goto out;
+	}
+
+	err = check_whitelist(wa, ctx, engine);
+	if (err) {
+		pr_err("Whitelist not preserved in context across %s reset!\n",
+		       name);
+		goto out;
+	}
+
+	kernel_context_close(ctx);
+
+	ctx = kernel_context(engine->i915);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	err = check_whitelist(wa, ctx, engine);
+	if (err) {
+		pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
+		       name);
+		goto out;
+	}
+
+out:
+	kernel_context_close(ctx);
+	return err;
+}
+
+static int live_reset_whitelist(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine = i915->engine[RCS];
+	struct whitelist wa;
+	int err;
+
+	/* If we reset the gpu, we should not lose the RING_NONPRIV */
+
+	if (!engine)
+		return 0;
+
+	whitelist_apply(engine, &wa);
+	pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", wa.count);
+
+	if (intel_has_reset_engine(i915)) {
+		err = check_whitelist_across_reset(engine,
+						   do_engine_reset, &wa,
+						   "engine");
+		if (err)
+			return err;
+	}
+
+	if (intel_has_gpu_reset(i915)) {
+		err = check_whitelist_across_reset(engine,
+						   do_device_reset, &wa,
+						   "device");
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int intel_workarounds_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_reset_whitelist),
+	};
+	int err;
+
+	mutex_lock(&i915->drm.struct_mutex);
+	err = i915_subtests(tests, i915);
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	return err;
+}
-- 
2.17.0



More information about the Intel-gfx-trybot mailing list