[Intel-gfx] [PATCH 3/6] drm/i915/guc: Add selftest for a hung GuC
John Harrison
john.c.harrison at intel.com
Thu Jul 28 18:21:08 UTC 2022
On 7/27/2022 19:42, John.C.Harrison at Intel.com wrote:
> From: Rahul Kumar Singh <rahul.kumar.singh at intel.com>
>
> Add a test to check that the hangcheck will recover from a submission
> hang in the GuC.
>
> Signed-off-by: Rahul Kumar Singh <rahul.kumar.singh at intel.com>
> Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
> ---
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 1 +
> .../drm/i915/gt/uc/selftest_guc_hangcheck.c | 159 ++++++++++++++++++
> .../drm/i915/selftests/i915_live_selftests.h | 1 +
> 3 files changed, 161 insertions(+)
> create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 0b8c6450fa344..ff205c4125857 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -5177,4 +5177,5 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> #include "selftest_guc.c"
> #include "selftest_guc_multi_lrc.c"
> +#include "selftest_guc_hangcheck.c"
> #endif
> diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
> new file mode 100644
> index 0000000000000..af913c4b09d37
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
> @@ -0,0 +1,159 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright �� 2019 Intel Corporation
Need to update the date.
> + */
> +
> +#include "selftests/igt_spinner.h"
> +#include "selftests/igt_reset.h"
> +#include "selftests/intel_scheduler_helpers.h"
> +#include "gt/intel_engine_heartbeat.h"
> +#include "gem/selftests/mock_context.h"
> +
> +#define BEAT_INTERVAL 100
> +
> +static struct i915_request *nop_request(struct intel_engine_cs *engine)
> +{
> + struct i915_request *rq;
> +
> + rq = intel_engine_create_kernel_request(engine);
> + if (IS_ERR(rq))
> + return rq;
> +
> + i915_request_get(rq);
> + i915_request_add(rq);
> +
> + return rq;
> +}
> +
> +static int intel_hang_guc(void *arg)
> +{
> + struct intel_gt *gt = arg;
> + int ret = 0;
> + struct i915_gem_context *ctx;
> + struct intel_context *ce;
> + struct igt_spinner spin;
> + struct i915_request *rq;
> + intel_wakeref_t wakeref;
> + struct i915_gpu_error *global = >->i915->gpu_error;
> + struct intel_engine_cs *engine;
> + unsigned int reset_count;
> + u32 guc_status;
> + u32 old_beat;
> +
> + ctx = kernel_context(gt->i915, NULL);
> + if (IS_ERR(ctx)) {
> + pr_err("Failed get kernel context: %ld\n", PTR_ERR(ctx));
Should not use pr_err when drm_err is available.
John.
> + return PTR_ERR(ctx);
> + }
> +
> + wakeref = intel_runtime_pm_get(gt->uncore->rpm);
> +
> + ce = intel_context_create(gt->engine[BCS0]);
> + if (IS_ERR(ce)) {
> + ret = PTR_ERR(ce);
> + pr_err("Failed to create spinner request: %d\n", ret);
> + goto err;
> + }
> +
> + engine = ce->engine;
> + reset_count = i915_reset_count(global);
> +
> + old_beat = engine->props.heartbeat_interval_ms;
> + ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
> + if (ret) {
> + pr_err("Failed to boost heatbeat interval: %d\n", ret);
> + goto err;
> + }
> +
> + ret = igt_spinner_init(&spin, engine->gt);
> + if (ret) {
> + pr_err("Failed to create spinner: %d\n", ret);
> + goto err;
> + }
> +
> + rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
> + intel_context_put(ce);
> + if (IS_ERR(rq)) {
> + ret = PTR_ERR(rq);
> + pr_err("Failed to create spinner request: %d\n", ret);
> + goto err_spin;
> + }
> +
> + ret = request_add_spin(rq, &spin);
> + if (ret) {
> + i915_request_put(rq);
> + pr_err("Failed to add Spinner request: %d\n", ret);
> + goto err_spin;
> + }
> +
> + ret = intel_reset_guc(gt);
> + if (ret) {
> + i915_request_put(rq);
> + pr_err("Failed to reset GuC, ret = %d\n", ret);
> + goto err_spin;
> + }
> +
> + guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
> + if (!(guc_status & GS_MIA_IN_RESET)) {
> + i915_request_put(rq);
> + pr_err("GuC failed to reset: status = 0x%08X\n", guc_status);
> + ret = -EIO;
> + goto err_spin;
> + }
> +
> + /* Wait for the heartbeat to cause a reset */
> + ret = intel_selftest_wait_for_rq(rq);
> + i915_request_put(rq);
> + if (ret) {
> + pr_err("Request failed to complete: %d\n", ret);
> + goto err_spin;
> + }
> +
> + if (i915_reset_count(global) == reset_count) {
> + pr_err("Failed to record a GPU reset\n");
> + ret = -EINVAL;
> + goto err_spin;
> + }
> +
> +err_spin:
> + igt_spinner_end(&spin);
> + igt_spinner_fini(&spin);
> + intel_engine_set_heartbeat(engine, old_beat);
> +
> + if (ret == 0) {
> + rq = nop_request(engine);
> + if (IS_ERR(rq)) {
> + ret = PTR_ERR(rq);
> + goto err;
> + }
> +
> + ret = intel_selftest_wait_for_rq(rq);
> + i915_request_put(rq);
> + if (ret) {
> + pr_err("No-op failed to complete: %d\n", ret);
> + goto err;
> + }
> + }
> +
> +err:
> + intel_runtime_pm_put(gt->uncore->rpm, wakeref);
> + kernel_context_close(ctx);
> +
> + return ret;
> +}
> +
> +int intel_guc_hang_check(struct drm_i915_private *i915)
> +{
> + static const struct i915_subtest tests[] = {
> + SUBTEST(intel_hang_guc),
> + };
> + struct intel_gt *gt = to_gt(i915);
> +
> + if (intel_gt_is_wedged(gt))
> + return 0;
> +
> + if (!intel_uc_uses_guc_submission(>->uc))
> + return 0;
> +
> + return intel_gt_live_subtests(tests, gt);
> +}
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index bdd290f2bf3cd..aaf8a380e5c78 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -49,5 +49,6 @@ selftest(perf, i915_perf_live_selftests)
> selftest(slpc, intel_slpc_live_selftests)
> selftest(guc, intel_guc_live_selftests)
> selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
> +selftest(guc_hang, intel_guc_hang_check)
> /* Here be dragons: keep last to run last! */
> selftest(late_gt_pm, intel_gt_pm_late_selftests)
More information about the Intel-gfx
mailing list