[Intel-gfx] [PATCH 7/7] drm/i915/selftests: Context SSEU reconfiguration tests

Chris Wilson chris at chris-wilson.co.uk
Tue Jan 8 12:50:09 UTC 2019


Quoting Tvrtko Ursulin (2019-01-08 11:22:52)
> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> 
> Exercise the context image reconfiguration logic for idle and busy
> contexts, with the resets thrown into the mix as well.
> 
> Free from the uAPI restrictions this test runs on all Gen9+ platforms
> with slice power gating.
> 
> v2:
>  * Rename some helpers for clarity.
>  * Include subtest names in error logs.
>  * Remove unnecessary function export.
> 
> v3:
>  * Rebase for RUNTIME_INFO.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

I can split a few hairs, but the selftest looks solid and I trust after
a few rounds with the hw, the expectations are correct.
Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>

> +static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
> +{
> +       struct drm_i915_gem_object *obj;
> +       u32 *cmd;
> +       int err;

This being the kernel, you could just emit the commands into the ring
and forgo another bo for the batch :)

> +
> +       if (INTEL_GEN(vma->vm->i915) < 8)
> +               return ERR_PTR(-EINVAL);
> +
> +       obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
> +       if (IS_ERR(obj))
> +               return ERR_CAST(obj);
> +
> +       cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
> +       if (IS_ERR(cmd)) {
> +               err = PTR_ERR(cmd);
> +               goto err;
> +       }
> +
> +       *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
> +       *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
> +       *cmd++ = lower_32_bits(vma->node.start);
> +       *cmd++ = upper_32_bits(vma->node.start);
> +       *cmd = MI_BATCH_BUFFER_END;
> +
> +       i915_gem_object_unpin_map(obj);
> +
> +       err = i915_gem_object_set_to_gtt_domain(obj, false);
> +       if (err)
> +               goto err;
> +
> +       vma = i915_vma_instance(obj, vma->vm, NULL);
> +       if (IS_ERR(vma)) {
> +               err = PTR_ERR(vma);
> +               goto err;
> +       }
> +
> +       err = i915_vma_pin(vma, 0, 0, PIN_USER);
> +       if (err)
> +               goto err;
> +
> +       return vma;
> +
> +err:
> +       i915_gem_object_put(obj);
> +       return ERR_PTR(err);
> +}
> +
> +static int
> +emit_rpcs_query(struct drm_i915_gem_object *obj,
> +               struct i915_gem_context *ctx,
> +               struct intel_engine_cs *engine,
> +               struct i915_request **rq_out)
> +{
> +       struct i915_address_space *vm;
> +       struct i915_request *rq;
> +       struct i915_vma *batch;
> +       struct i915_vma *vma;
> +       int err;
> +
> +       GEM_BUG_ON(!ctx->ppgtt);
> +       GEM_BUG_ON(!intel_engine_can_store_dword(engine));
> +
> +       vm = &ctx->ppgtt->vm;
> +
> +       vma = i915_vma_instance(obj, vm, NULL);
> +       if (IS_ERR(vma))
> +               return PTR_ERR(vma);
> +
> +       err = i915_gem_object_set_to_gtt_domain(obj, false);
> +       if (err)
> +               return err;
> +
> +       err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
> +       if (err)
> +               return err;
> +
> +       batch = rpcs_query_batch(vma);
> +       if (IS_ERR(batch)) {
> +               err = PTR_ERR(batch);
> +               goto err_vma;
> +       }
> +
> +       rq = i915_request_alloc(engine, ctx);
> +       if (IS_ERR(rq)) {
> +               err = PTR_ERR(rq);
> +               goto err_batch;
> +       }
> +
> +       err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0);
> +       if (err)
> +               goto err_request;
> +
> +       err = i915_vma_move_to_active(batch, rq, 0);
> +       if (err)
> +               goto skip_request;
> +
> +       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
> +       if (err)
> +               goto skip_request;
> +
> +       i915_gem_object_set_active_reference(batch->obj);
> +       i915_vma_unpin(batch);
> +       i915_vma_close(batch);
> +
> +       i915_vma_unpin(vma);
> +
> +       *rq_out = i915_request_get(rq);
> +
> +       i915_request_add(rq);
> +
> +       return 0;
> +
> +skip_request:
> +       i915_request_skip(rq, err);
> +err_request:
> +       i915_request_add(rq);
> +err_batch:
> +       i915_vma_unpin(batch);
> +err_vma:
> +       i915_vma_unpin(vma);
> +
> +       return err;
> +}
> +
> +#define TEST_IDLE      (1 << 0)
> +#define TEST_BUSY      (1 << 1)
> +#define TEST_RESET     (1 << 2)
> +
> +int
> +__i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
> +                                   struct intel_engine_cs *engine,
> +                                   struct intel_sseu sseu);

Is this not part of i915_gem_context.c, as is this selftest? So just use
a static.

> +static int
> +__sseu_prepare(struct drm_i915_private *i915,
> +              const char *name,
> +              unsigned int flags,
> +              struct i915_gem_context *ctx,
> +              struct intel_engine_cs *engine,
> +              struct igt_spinner **spin_out)
> +{
> +       int ret = 0;
> +
> +       if (flags & (TEST_BUSY | TEST_RESET)) {
> +               struct igt_spinner *spin;
> +               struct i915_request *rq;
> +
> +               spin = kzalloc(sizeof(*spin), GFP_KERNEL);
> +               if (!spin) {
> +                       ret = -ENOMEM;
> +                       goto out;
> +               }
> +
> +               ret = igt_spinner_init(spin, i915);
> +               if (ret)
> +                       return ret;
> +
> +               rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
> +               if (IS_ERR(rq)) {
> +                       ret = PTR_ERR(rq);
> +                       igt_spinner_fini(spin);
> +                       kfree(spin);
> +                       goto out;
> +               }
> +
> +               i915_request_add(rq);
> +
> +               if (!igt_wait_for_spinner(spin, rq)) {
> +                       pr_err("%s: Spinner failed to start!\n", name);
> +                       igt_spinner_end(spin);
> +                       igt_spinner_fini(spin);
> +                       kfree(spin);
> +                       ret = -ETIMEDOUT;
> +                       goto out;
> +               }
> +
> +               *spin_out = spin;
> +       }
> +
> +out:
> +       return ret;
> +}
> +
> +static int
> +__read_slice_count(struct drm_i915_private *i915,
> +                  struct i915_gem_context *ctx,
> +                  struct intel_engine_cs *engine,
> +                  struct drm_i915_gem_object *obj,
> +                  struct igt_spinner *spin,
> +                  u32 *rpcs)
> +{
> +       struct i915_request *rq = NULL;
> +       u32 s_mask, s_shift;
> +       unsigned int cnt;
> +       u32 *buf, val;
> +       long ret;
> +
> +       ret = emit_rpcs_query(obj, ctx, engine, &rq);
> +       if (ret)
> +               return ret;
> +
> +       if (spin)
> +               igt_spinner_end(spin);
> +
> +       ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
> +       if (ret <= 0)
> +               return ret < 0 ? ret : -ETIME;
> +       i915_request_put(rq);
> +
> +       buf = i915_gem_object_pin_map(obj, I915_MAP_WB);

This is i915_gem_obj_prepare_shmem_read() (bad name).

> +       if (IS_ERR(buf)) {
> +               ret = PTR_ERR(buf);
> +               return ret;
> +       }
> +
> +       if (INTEL_GEN(i915) >= 11) {
> +               s_mask = GEN11_RPCS_S_CNT_MASK;
> +               s_shift = GEN11_RPCS_S_CNT_SHIFT;
> +       } else {
> +               s_mask = GEN8_RPCS_S_CNT_MASK;
> +               s_shift = GEN8_RPCS_S_CNT_SHIFT;
> +       }
> +
> +       val = *buf;
> +       cnt = (val & s_mask) >> s_shift;
> +       *rpcs = val;
> +
> +       i915_gem_object_unpin_map(obj);
> +
> +       return cnt;
> +}
-Chris


More information about the Intel-gfx mailing list