[Intel-gfx] [PATCH 06/38] drm/i915/selftests: Check that whitelisted registers are accessible
Michał Winiarski
michal.winiarski at intel.com
Fri Mar 1 15:18:58 UTC 2019
On Fri, Mar 01, 2019 at 02:03:32PM +0000, Chris Wilson wrote:
> There is no point in whitelisting a register that the user then cannot
> write to, so check the register exists before merging such patches.
>
> v2: Mark SLICE_COMMON_ECO_CHICKEN1 [731c] as write-only
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Dale B Stimson <dale.b.stimson at intel.com>
> Cc: Michał Winiarski <michal.winiarski at intel.com>
> ---
> .../drm/i915/selftests/intel_workarounds.c | 376 ++++++++++++++++++
> 1 file changed, 376 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> index e6ffc8ac22dc..33b3ced83fde 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> @@ -12,6 +12,14 @@
> #include "igt_spinner.h"
> #include "igt_wedge_me.h"
> #include "mock_context.h"
> +#include "mock_drm.h"
> +
> +static const struct wo_register {
> + enum intel_platform platform;
> + u32 reg;
> +} wo_registers[] = {
> + { INTEL_GEMINILAKE, 0x731c }
> +};
>
> #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
> struct wa_lists {
> @@ -331,6 +339,373 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
> return err;
> }
>
> +static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
> +{
> + struct drm_i915_gem_object *obj;
> + struct i915_vma *vma;
> + void *ptr;
> + int err;
> +
> + obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
> + if (IS_ERR(obj))
> + return ERR_CAST(obj);
> +
> + i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
Check return value.
> +
> + ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
> + if (IS_ERR(ptr)) {
> + err = PTR_ERR(ptr);
> + goto err_obj;
> + }
> + memset(ptr, 0xc5, PAGE_SIZE);
> + i915_gem_object_unpin_map(obj);
> +
> + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
> + if (IS_ERR(vma)) {
> + err = PTR_ERR(vma);
> + goto err_obj;
> + }
> +
> + err = i915_vma_pin(vma, 0, 0, PIN_USER);
> + if (err)
> + goto err_obj;
> +
> + err = i915_gem_object_set_to_cpu_domain(obj, false);
> + if (err)
> + goto err_obj;
> +
> + return vma;
> +
> +err_obj:
> + i915_gem_object_put(obj);
> + return ERR_PTR(err);
> +}
> +
[SNIP]
> +static int check_dirty_whitelist(struct i915_gem_context *ctx,
> + struct intel_engine_cs *engine)
> +{
> + const u32 values[] = {
> + 0x00000000,
> + 0x01010101,
> + 0x10100101,
> + 0x03030303,
> + 0x30300303,
> + 0x05050505,
> + 0x50500505,
> + 0x0f0f0f0f,
> + 0xf00ff00f,
> + 0x10101010,
> + 0xf0f01010,
> + 0x30303030,
> + 0xa0a03030,
> + 0x50505050,
> + 0xc0c05050,
> + 0xf0f0f0f0,
> + 0x11111111,
> + 0x33333333,
> + 0x55555555,
> + 0x0000ffff,
> + 0x00ff00ff,
> + 0xff0000ff,
> + 0xffff00ff,
> + 0xffffffff,
> + };
> + struct i915_vma *scratch;
> + struct i915_vma *batch;
> + int err = 0, i, v;
> + u32 *cs;
> +
> + scratch = create_scratch(ctx);
> + if (IS_ERR(scratch))
> + return PTR_ERR(scratch);
> +
> + batch = create_batch(ctx);
> + if (IS_ERR(batch)) {
> + err = PTR_ERR(batch);
> + goto out_scratch;
> + }
> +
> + for (i = 0; i < engine->whitelist.count; i++) {
> + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
> + u64 addr = scratch->node.start;
> + struct i915_request *rq;
> + u32 srm, lrm, rsvd;
> + u32 expect;
> + int idx;
> +
> + if (wo_register(engine, reg))
> + continue;
> +
> + srm = MI_STORE_REGISTER_MEM;
> + lrm = MI_LOAD_REGISTER_MEM;
> + if (INTEL_GEN(ctx->i915) >= 8)
> + lrm++, srm++;
> +
> + pr_debug("%s: Writing garbage to %x {srm=0x%08x, lrm=0x%08x}\n",
> + engine->name, reg, srm, lrm);
Why are we printing opcodes (srm/lrm)?
> +
> + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
> + if (IS_ERR(cs)) {
> + err = PTR_ERR(cs);
> + goto out_batch;
> + }
> +
> + /* SRM original */
> + *cs++ = srm;
> + *cs++ = reg;
> + *cs++ = lower_32_bits(addr);
> + *cs++ = upper_32_bits(addr);
> +
> + idx = 1;
> + for (v = 0; v < ARRAY_SIZE(values); v++) {
> + /* LRI garbage */
> + *cs++ = MI_LOAD_REGISTER_IMM(1);
> + *cs++ = reg;
> + *cs++ = values[v];
> +
> + /* SRM result */
> + *cs++ = srm;
> + *cs++ = reg;
> + *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
> + *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
> + idx++;
> + }
> + for (v = 0; v < ARRAY_SIZE(values); v++) {
> + /* LRI garbage */
> + *cs++ = MI_LOAD_REGISTER_IMM(1);
> + *cs++ = reg;
> + *cs++ = ~values[v];
> +
> + /* SRM result */
> + *cs++ = srm;
> + *cs++ = reg;
> + *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
> + *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
> + idx++;
> + }
> + GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
> +
> + /* LRM original -- don't leave garbage in the context! */
> + *cs++ = lrm;
> + *cs++ = reg;
> + *cs++ = lower_32_bits(addr);
> + *cs++ = upper_32_bits(addr);
> +
> + *cs++ = MI_BATCH_BUFFER_END;
> +
> + i915_gem_object_unpin_map(batch->obj);
> + i915_gem_chipset_flush(ctx->i915);
> +
> + rq = i915_request_alloc(engine, ctx);
> + if (IS_ERR(rq)) {
> + err = PTR_ERR(rq);
> + goto out_batch;
> + }
> +
> + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
> + err = engine->emit_init_breadcrumb(rq);
> + if (err)
> + goto err_request;
> + }
> +
> + err = engine->emit_bb_start(rq,
> + batch->node.start, PAGE_SIZE,
> + 0);
> + if (err)
> + goto err_request;
> +
> +err_request:
> + i915_request_add(rq);
> + if (err)
> + goto out_batch;
> +
> + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
> + pr_err("%s: Futzing %x timedout; cancelling test\n",
> + engine->name, reg);
> + i915_gem_set_wedged(ctx->i915);
> + err = -EIO;
> + goto out_batch;
> + }
> +
> + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
> + if (IS_ERR(cs)) {
> + err = PTR_ERR(cs);
> + goto out_batch;
> + }
We're already using cs for batch! Extra pointer pls.
> +
> + GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
> + rsvd = cs[ARRAY_SIZE(values)]; /* detect write masking */
So we're writing 0xffffffff to get the mask. And there's a comment. And it will
explode if someone changes the last value.
Reviewed-by: Michał Winiarski <michal.winiarski at intel.com>
> + if (!rsvd) {
> + pr_err("%s: Unable to write to whitelisted register %x\n",
> + engine->name, reg);
> + err = -EINVAL;
> + goto out_unpin;
> + }
> +
> + expect = cs[0];
> + idx = 1;
> + for (v = 0; v < ARRAY_SIZE(values); v++) {
> + expect = reg_write(expect, values[v], rsvd);
> + if (cs[idx] != expect)
> + err++;
> + idx++;
> + }
> + for (v = 0; v < ARRAY_SIZE(values); v++) {
> + expect = reg_write(expect, ~values[v], rsvd);
> + if (cs[idx] != expect)
> + err++;
> + idx++;
> + }
> + if (err) {
> + pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
> + engine->name, err, reg);
> +
> + pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
> + engine->name, reg, cs[0], rsvd);
> +
> + expect = cs[0];
> + idx = 1;
> + for (v = 0; v < ARRAY_SIZE(values); v++) {
> + u32 w = values[v];
> +
> + expect = reg_write(expect, w, rsvd);
> + pr_info("Wrote %08x, read %08x, expect %08x\n",
> + w, cs[idx], expect);
> + idx++;
> + }
> + for (v = 0; v < ARRAY_SIZE(values); v++) {
> + u32 w = ~values[v];
> +
> + expect = reg_write(expect, w, rsvd);
> + pr_info("Wrote %08x, read %08x, expect %08x\n",
> + w, cs[idx], expect);
> + idx++;
> + }
> +
> + err = -EINVAL;
> + }
> +out_unpin:
> + i915_gem_object_unpin_map(scratch->obj);
> + if (err)
> + break;
> + }
> +
> + if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
> + err = -EIO;
> +out_batch:
> + i915_vma_unpin_and_release(&batch, 0);
> +out_scratch:
> + i915_vma_unpin_and_release(&scratch, 0);
> + return err;
> +}
> +
> +static int live_dirty_whitelist(void *arg)
> +{
> + struct drm_i915_private *i915 = arg;
> + struct intel_engine_cs *engine;
> + struct i915_gem_context *ctx;
> + enum intel_engine_id id;
> + intel_wakeref_t wakeref;
> + struct drm_file *file;
> + int err = 0;
> +
> + /* Can the user write to the whitelisted registers? */
> +
> + if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
> + return 0;
> +
> + wakeref = intel_runtime_pm_get(i915);
> +
> + mutex_unlock(&i915->drm.struct_mutex);
> + file = mock_file(i915);
> + mutex_lock(&i915->drm.struct_mutex);
> + if (IS_ERR(file)) {
> + err = PTR_ERR(file);
> + goto out_rpm;
> + }
> +
> + ctx = live_context(i915, file);
> + if (IS_ERR(ctx)) {
> + err = PTR_ERR(ctx);
> + goto out_file;
> + }
> +
> + for_each_engine(engine, i915, id) {
> + if (engine->whitelist.count == 0)
> + continue;
> +
> + err = check_dirty_whitelist(ctx, engine);
> + if (err)
> + goto out_file;
> + }
> +
> +out_file:
> + mutex_unlock(&i915->drm.struct_mutex);
> + mock_file_free(i915, file);
> + mutex_lock(&i915->drm.struct_mutex);
> +out_rpm:
> + intel_runtime_pm_put(i915, wakeref);
> + return err;
> +}
> +
> static int live_reset_whitelist(void *arg)
> {
> struct drm_i915_private *i915 = arg;
> @@ -504,6 +879,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
> int intel_workarounds_live_selftests(struct drm_i915_private *i915)
> {
> static const struct i915_subtest tests[] = {
> + SUBTEST(live_dirty_whitelist),
> SUBTEST(live_reset_whitelist),
> SUBTEST(live_gpu_reset_gt_engine_workarounds),
> SUBTEST(live_engine_reset_gt_engine_workarounds),
> --
> 2.20.1
>
More information about the Intel-gfx
mailing list