[Intel-gfx] [PATCH] drm/i915/selftests: Teach workarounds to take intel_gt as its argument
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Wed Oct 16 12:41:33 UTC 2019
On 16/10/2019 12:49, Chris Wilson wrote:
> The workarounds selftests are hardware centric and so want to use the gt
> as its target.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
> .../gpu/drm/i915/gt/selftest_workarounds.c | 128 +++++++++---------
> 1 file changed, 65 insertions(+), 63 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> index dc11f7ad50a2..8856c6c46cc4 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> @@ -58,7 +58,7 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
> }
>
> static void
> -reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
> +reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
> {
> struct intel_engine_cs *engine;
> enum intel_engine_id id;
> @@ -66,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
> memset(lists, 0, sizeof(*lists));
>
> wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
> - gt_init_workarounds(i915, &lists->gt_wa_list);
> + gt_init_workarounds(gt->i915, &lists->gt_wa_list);
> wa_init_finish(&lists->gt_wa_list);
>
> - for_each_engine(engine, i915, id) {
> + for_each_engine(engine, gt->i915, id) {
^^^
> struct i915_wa_list *wal = &lists->engine[id].wa_list;
>
> wa_init_start(wal, "REF", engine->name);
> @@ -83,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
> }
>
> static void
> -reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
> +reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
> {
> struct intel_engine_cs *engine;
> enum intel_engine_id id;
>
> - for_each_engine(engine, i915, id)
> + for_each_engine(engine, gt->i915, id)
> intel_wa_list_free(&lists->engine[id].wa_list);
>
> intel_wa_list_free(&lists->gt_wa_list);
> @@ -215,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
>
> err = 0;
> i915_gem_object_lock(results);
> - intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
> + intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
> err = i915_gem_object_set_to_cpu_domain(results, false);
> i915_gem_object_unlock(results);
> - if (intel_gt_is_wedged(&ctx->i915->gt))
> + if (intel_gt_is_wedged(engine->gt))
> err = -EIO;
> if (err)
> goto out_put;
> @@ -605,7 +605,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
> if (err) {
> pr_err("%s: Futzing %x timedout; cancelling test\n",
> engine->name, reg);
> - intel_gt_set_wedged(&ctx->i915->gt);
> + intel_gt_set_wedged(engine->gt);
> goto out_batch;
> }
>
> @@ -704,7 +704,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
>
> static int live_dirty_whitelist(void *arg)
> {
> - struct drm_i915_private *i915 = arg;
> + struct intel_gt *gt = arg;
> struct intel_engine_cs *engine;
> struct i915_gem_context *ctx;
> enum intel_engine_id id;
> @@ -713,20 +713,20 @@ static int live_dirty_whitelist(void *arg)
>
> /* Can the user write to the whitelisted registers? */
>
> - if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
> + if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
> return 0;
>
> - file = mock_file(i915);
> + file = mock_file(gt->i915);
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - ctx = live_context(i915, file);
> + ctx = live_context(gt->i915, file);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out_file;
> }
>
> - for_each_engine(engine, i915, id) {
> + for_each_engine(engine, gt->i915, id) {
^^^
> if (engine->whitelist.count == 0)
> continue;
>
> @@ -736,41 +736,43 @@ static int live_dirty_whitelist(void *arg)
> }
>
> out_file:
> - mock_file_free(i915, file);
> + mock_file_free(gt->i915, file);
> return err;
> }
>
> static int live_reset_whitelist(void *arg)
> {
> - struct drm_i915_private *i915 = arg;
> - struct intel_engine_cs *engine = i915->engine[RCS0];
> + struct intel_gt *gt = arg;
> + struct intel_engine_cs *engine;
> + enum intel_engine_id id;
> int err = 0;
>
> /* If we reset the gpu, we should not lose the RING_NONPRIV */
> + igt_global_reset_lock(gt);
>
> - if (!engine || engine->whitelist.count == 0)
> - return 0;
> -
> - igt_global_reset_lock(&i915->gt);
> + for_each_engine(engine, gt->i915, id) {
^^^
> + if (engine->whitelist.count == 0)
> + continue;
>
> - if (intel_has_reset_engine(&i915->gt)) {
> - err = check_whitelist_across_reset(engine,
> - do_engine_reset,
> - "engine");
> - if (err)
> - goto out;
> - }
> + if (intel_has_reset_engine(gt)) {
> + err = check_whitelist_across_reset(engine,
> + do_engine_reset,
> + "engine");
> + if (err)
> + goto out;
> + }
>
> - if (intel_has_gpu_reset(&i915->gt)) {
> - err = check_whitelist_across_reset(engine,
> - do_device_reset,
> - "device");
> - if (err)
> - goto out;
> + if (intel_has_gpu_reset(gt)) {
> + err = check_whitelist_across_reset(engine,
> + do_device_reset,
> + "device");
> + if (err)
> + goto out;
> + }
> }
>
> out:
> - igt_global_reset_unlock(&i915->gt);
> + igt_global_reset_unlock(gt);
> return err;
> }
>
> @@ -996,7 +998,7 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
>
> static int live_isolated_whitelist(void *arg)
> {
> - struct drm_i915_private *i915 = arg;
> + struct intel_gt *gt = arg;
> struct {
> struct i915_gem_context *ctx;
> struct i915_vma *scratch[2];
> @@ -1010,17 +1012,14 @@ static int live_isolated_whitelist(void *arg)
> * invisible to a second context.
> */
>
> - if (!intel_engines_has_context_isolation(i915))
> - return 0;
> -
> - if (!i915->kernel_context->vm)
> + if (!intel_engines_has_context_isolation(gt->i915))
> return 0;
>
> for (i = 0; i < ARRAY_SIZE(client); i++) {
> struct i915_address_space *vm;
> struct i915_gem_context *c;
>
> - c = kernel_context(i915);
> + c = kernel_context(gt->i915);
> if (IS_ERR(c)) {
> err = PTR_ERR(c);
> goto err;
> @@ -1049,7 +1048,10 @@ static int live_isolated_whitelist(void *arg)
> i915_vm_put(vm);
> }
>
> - for_each_engine(engine, i915, id) {
> + for_each_engine(engine, gt->i915, id) {
^^^
> + if (!engine->kernel_context->vm)
> + continue;
> +
> if (!whitelist_writable_count(engine))
> continue;
>
> @@ -1103,7 +1105,7 @@ static int live_isolated_whitelist(void *arg)
> kernel_context_close(client[i].ctx);
> }
>
> - if (igt_flush_test(i915))
> + if (igt_flush_test(gt->i915))
> err = -EIO;
>
> return err;
> @@ -1138,16 +1140,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
> static int
> live_gpu_reset_workarounds(void *arg)
> {
> - struct drm_i915_private *i915 = arg;
> + struct intel_gt *gt = arg;
> struct i915_gem_context *ctx;
> intel_wakeref_t wakeref;
> struct wa_lists lists;
> bool ok;
>
> - if (!intel_has_gpu_reset(&i915->gt))
> + if (!intel_has_gpu_reset(gt))
> return 0;
>
> - ctx = kernel_context(i915);
> + ctx = kernel_context(gt->i915);
> if (IS_ERR(ctx))
> return PTR_ERR(ctx);
>
> @@ -1155,25 +1157,25 @@ live_gpu_reset_workarounds(void *arg)
>
> pr_info("Verifying after GPU reset...\n");
>
> - igt_global_reset_lock(&i915->gt);
> - wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> + igt_global_reset_lock(gt);
> + wakeref = intel_runtime_pm_get(gt->uncore->rpm);
>
> - reference_lists_init(i915, &lists);
> + reference_lists_init(gt, &lists);
>
> ok = verify_wa_lists(ctx, &lists, "before reset");
> if (!ok)
> goto out;
>
> - intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
> + intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
>
> ok = verify_wa_lists(ctx, &lists, "after reset");
>
> out:
> i915_gem_context_unlock_engines(ctx);
> kernel_context_close(ctx);
> - reference_lists_fini(i915, &lists);
> - intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> - igt_global_reset_unlock(&i915->gt);
> + reference_lists_fini(gt, &lists);
> + intel_runtime_pm_put(gt->uncore->rpm, wakeref);
> + igt_global_reset_unlock(gt);
>
> return ok ? 0 : -ESRCH;
> }
> @@ -1181,7 +1183,7 @@ live_gpu_reset_workarounds(void *arg)
> static int
> live_engine_reset_workarounds(void *arg)
> {
> - struct drm_i915_private *i915 = arg;
> + struct intel_gt *gt = arg;
> struct i915_gem_engines_iter it;
> struct i915_gem_context *ctx;
> struct intel_context *ce;
> @@ -1191,17 +1193,17 @@ live_engine_reset_workarounds(void *arg)
> struct wa_lists lists;
> int ret = 0;
>
> - if (!intel_has_reset_engine(&i915->gt))
> + if (!intel_has_reset_engine(gt))
> return 0;
>
> - ctx = kernel_context(i915);
> + ctx = kernel_context(gt->i915);
> if (IS_ERR(ctx))
> return PTR_ERR(ctx);
>
> - igt_global_reset_lock(&i915->gt);
> - wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> + igt_global_reset_lock(gt);
> + wakeref = intel_runtime_pm_get(gt->uncore->rpm);
>
> - reference_lists_init(i915, &lists);
> + reference_lists_init(gt, &lists);
>
> for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
> struct intel_engine_cs *engine = ce->engine;
> @@ -1254,12 +1256,12 @@ live_engine_reset_workarounds(void *arg)
> }
> err:
> i915_gem_context_unlock_engines(ctx);
> - reference_lists_fini(i915, &lists);
> - intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> - igt_global_reset_unlock(&i915->gt);
> + reference_lists_fini(gt, &lists);
> + intel_runtime_pm_put(gt->uncore->rpm, wakeref);
> + igt_global_reset_unlock(gt);
> kernel_context_close(ctx);
>
> - igt_flush_test(i915);
> + igt_flush_test(gt->i915);
>
> return ret;
> }
> @@ -1277,5 +1279,5 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
> if (intel_gt_is_wedged(&i915->gt))
> return 0;
>
> - return i915_subtests(tests, i915);
> + return intel_gt_live_subtests(tests, &i915->gt);
> }
>
With gt instead of gt->i915 in for_each_engine:
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list