[Intel-gfx] [PATCH 2/2] drm/i915: Use same test for eviction and submitting kernel context
Chris Wilson
chris at chris-wilson.co.uk
Tue Oct 24 22:08:55 UTC 2017
During evict, we wish to idle the GPU if we see that the GGTT is full.
However, our test for idle in i915_gem_evict_something() and in
i915_gem_switch_to_kernel_context() do not match leading to
disappointment - we never believe that we are idle and keep trying to
flush the GGTT ad infinitum.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103438
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Cc: Mika Kuoppala <mika.kuoppala at intel.com>
---
drivers/gpu/drm/i915/i915_gem_context.c | 7 +++----
drivers/gpu/drm/i915/i915_gem_evict.c | 3 ++-
drivers/gpu/drm/i915/intel_engine_cs.c | 6 ++++++
drivers/gpu/drm/i915/intel_ringbuffer.h | 2 ++
4 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5bf96a258509..4f26f80b1b3e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -897,7 +897,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
return do_rcs_switch(req);
}
-static bool engine_has_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{
struct i915_gem_timeline *timeline;
@@ -913,8 +913,7 @@ static bool engine_has_kernel_context(struct intel_engine_cs *engine)
return false;
}
- return (!engine->last_retired_context ||
- i915_gem_context_is_kernel(engine->last_retired_context));
+ return intel_engine_has_kernel_context(engine);
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
@@ -931,7 +930,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
struct drm_i915_gem_request *req;
int ret;
- if (engine_has_kernel_context(engine))
+ if (engine_has_idle_kernel_context(engine))
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a6b769994d8d..60ca4f05ae94 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -46,7 +46,7 @@ static bool ggtt_is_idle(struct drm_i915_private *i915)
return false;
for_each_engine(engine, i915, id) {
- if (engine->last_retired_context != i915->kernel_context)
+ if (!intel_engine_has_kernel_context(engine))
return false;
}
@@ -73,6 +73,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
if (err)
return err;
+ GEM_BUG_ON(!ggtt_is_idle(i915));
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e2e28e..f6cdc50d4237 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1585,6 +1585,12 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
+bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
+{
+ return (!engine->last_retired_context ||
+ i915_gem_context_is_kernel(engine->last_retired_context));
+}
+
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6a42ed618a28..a2589aa89163 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -866,6 +866,8 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+
void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
--
2.15.0.rc2
More information about the Intel-gfx
mailing list