[Intel-gfx] [PATCH 7/7] drm/i915: Only recover active engines
Chris Wilson
chris at chris-wilson.co.uk
Sat Jun 22 10:42:16 UTC 2019
If we issue a reset to a currently idle engine, leave it idle
afterwards. This is useful to excise a linkage between reset and the
shrinker. When waking the engine, we need to pin the default context
image which we use for overwriting a guilty context -- if the engine is
idle we do not need this pinned image! However, this pinning means that
waking the engine acquires the FS_RECLAIM, and so may trigger the
shrinker. The shrinker itself may need to wait upon the GPU to unbind
and object and so may require services of reset; ergo we should avoid
the engine wake up path.
The danger in skipping the recovery for idle engines is that we leave the
engine with no context defined, which may interfere with the operation of
the power context on some older platforms. In practice, we should only
be resetting an active GPU but it something to look out for on Ironlake
(if memory serves).
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_reset.c | 48 ++++++++++++++++--------
drivers/gpu/drm/i915/gt/selftest_reset.c | 8 +++-
2 files changed, 39 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 25ce2f26b96e..45f30153a6e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -669,8 +669,10 @@ int intel_reset_guc(struct drm_i915_private *i915)
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
-static void reset_prepare_engine(struct intel_engine_cs *engine)
+static bool reset_prepare_engine(struct intel_engine_cs *engine)
{
+ bool awake;
+
/*
* During the reset sequence, we must prevent the engine from
* entering RC6. As the context state is undefined until we restart
@@ -678,9 +680,12 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ awake = intel_engine_pm_get_if_awake(engine);
+
engine->reset.prepare(engine);
+
+ return awake;
}
static void revoke_mmaps(struct drm_i915_private *i915)
@@ -709,16 +714,20 @@ static void revoke_mmaps(struct drm_i915_private *i915)
}
}
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake = 0;
enum intel_engine_id id;
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id)
- reset_prepare_engine(engine);
+ if (reset_prepare_engine(engine))
+ awake |= engine->mask;
intel_uc_reset_prepare(i915);
+
+ return awake;
}
static void gt_revoke(struct drm_i915_private *i915)
@@ -749,20 +758,23 @@ static int gt_reset(struct drm_i915_private *i915,
return err;
}
-static void reset_finish_engine(struct intel_engine_cs *engine)
+static void reset_finish_engine(struct intel_engine_cs *engine, bool awake)
{
engine->reset.finish(engine);
- intel_engine_pm_put(engine);
+ if (awake)
+ intel_engine_pm_put(engine);
+
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+ intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
- reset_finish_engine(engine);
+ reset_finish_engine(engine, awake & engine->mask);
intel_engine_signal_breadcrumbs(engine);
}
intel_gt_pm_put(i915);
@@ -789,6 +801,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &error->flags))
@@ -808,7 +821,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- reset_prepare(i915);
+ awake = reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -832,7 +845,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- reset_finish(i915);
+ reset_finish(i915, awake);
GEM_TRACE("end\n");
}
@@ -964,6 +977,7 @@ void i915_reset(struct drm_i915_private *i915,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
+ intel_engine_mask_t awake;
int ret;
GEM_TRACE("flags=%lx\n", error->flags);
@@ -980,7 +994,7 @@ void i915_reset(struct drm_i915_private *i915,
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
- reset_prepare(i915);
+ awake = reset_prepare(i915);
if (!intel_has_gpu_reset(i915)) {
if (i915_modparams.reset)
@@ -1021,7 +1035,7 @@ void i915_reset(struct drm_i915_private *i915,
i915_queue_hangcheck(i915);
finish:
- reset_finish(i915);
+ reset_finish(i915, awake);
unlock:
mutex_unlock(&error->wedge_mutex);
return;
@@ -1067,7 +1081,8 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
{
struct i915_gpu_error *error = &engine->i915->gpu_error;
- int ret;
+ int ret = 0;
+ bool awake;
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
@@ -1075,7 +1090,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
if (!intel_engine_is_awake(engine))
return 0;
- reset_prepare_engine(engine);
+ awake = reset_prepare_engine(engine);
+ if (!awake)
+ goto finish;
if (msg)
dev_notice(engine->i915->drm.dev,
@@ -1112,7 +1129,8 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
out:
intel_engine_cancel_stop_cs(engine);
- reset_finish_engine(engine);
+finish:
+ reset_finish_engine(engine, awake);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7cc1ba..f173a264bb59 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -71,12 +71,16 @@ static int igt_atomic_reset(void *arg)
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
+ intel_engine_mask_t awake;
+
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
p->critical_section_begin();
- reset_prepare(i915);
+ awake = reset_prepare(i915);
+
err = intel_gpu_reset(i915, ALL_ENGINES);
- reset_finish(i915);
+
+ reset_finish(i915, awake);
p->critical_section_end();
if (err) {
--
2.20.1
More information about the Intel-gfx
mailing list