[Intel-gfx] [PATCH 06/43] drm/i915: Reduce presumption of request ordering for barriers
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Thu Mar 7 17:26:49 UTC 2019
On 06/03/2019 14:24, Chris Wilson wrote:
> Currently we assume that we know the order in which requests run and so
> can determine if we need to reissue a switch-to-kernel-context prior to
> idling. That assumption does not hold for the future, so instead of
> tracking which barriers have been used, simply determine if we have ever
> switched away from the kernel context by using the engine and before
> idling ensure that all engines that have been used since the last idle
> are synchronously switched back to the kernel context for safety (and
> else of shrinking memory while idle).
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 1 +
> drivers/gpu/drm/i915/i915_gem.c | 13 ++--
> drivers/gpu/drm/i915/i915_gem_context.c | 66 +------------------
> drivers/gpu/drm/i915/i915_gem_context.h | 3 +-
> drivers/gpu/drm/i915/i915_gem_evict.c | 2 +-
> drivers/gpu/drm/i915/i915_request.c | 1 +
> drivers/gpu/drm/i915/intel_engine_cs.c | 5 ++
> .../gpu/drm/i915/selftests/i915_gem_context.c | 3 +-
> .../gpu/drm/i915/selftests/igt_flush_test.c | 2 +-
> .../gpu/drm/i915/selftests/mock_gem_device.c | 4 ++
> 10 files changed, 28 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index de142a268371..59d041eb72d7 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1994,6 +1994,7 @@ struct drm_i915_private {
>
> struct list_head active_rings;
> struct list_head closed_vma;
> + unsigned long active_engines;
Can use intel_engine_mask_t.
> u32 active_requests;
>
> /**
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index d4800e7d6f2c..ed7695fd444a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2845,9 +2845,10 @@ static void assert_kernel_context_is_current(struct drm_i915_private *i915)
> }
> }
>
> -static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
> +static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
> + unsigned long mask)
> {
> - if (i915_gem_switch_to_kernel_context(i915))
> + if (i915_gem_switch_to_kernel_context(i915, mask))
> return false;
>
> if (i915_gem_wait_for_idle(i915,
> @@ -2862,7 +2863,8 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
>
> static bool load_power_context(struct drm_i915_private *i915)
> {
> - if (!switch_to_kernel_context_sync(i915))
> + /* Force loading the kernel context on all engines */
> + if (!switch_to_kernel_context_sync(i915, -1))
ALL_ENGINES for self-documenting readability?
> return false;
>
> /*
> @@ -2910,7 +2912,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
> if (!gt->active_requests && !work_pending(>->idle_work.work)) {
> ++gt->active_requests; /* don't requeue idle */
>
> - if (!switch_to_kernel_context_sync(i915)) {
> + if (!switch_to_kernel_context_sync(i915,
> + i915->gt.active_engines)) {
> dev_err(i915->drm.dev,
> "Failed to idle engines, declaring wedged!\n");
> GEM_TRACE_DUMP();
> @@ -4369,7 +4372,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
> * state. Fortunately, the kernel_context is disposable and we do
> * not rely on its state.
> */
> - if (!switch_to_kernel_context_sync(i915)) {
> + if (!switch_to_kernel_context_sync(i915, i915->gt.active_engines)) {
> /* Forcibly cancel outstanding work and leave the gpu quiet. */
> i915_gem_set_wedged(i915);
> }
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 9a3eb4f66d85..486203e9d205 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -704,63 +704,10 @@ last_request_on_engine(struct i915_timeline *timeline,
> return NULL;
> }
>
> -static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
> -{
> - struct drm_i915_private *i915 = engine->i915;
> - const struct intel_context * const ce =
> - to_intel_context(i915->kernel_context, engine);
> - struct i915_timeline *barrier = ce->ring->timeline;
> - struct intel_ring *ring;
> - bool any_active = false;
> -
> - lockdep_assert_held(&i915->drm.struct_mutex);
> - list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
> - struct i915_request *rq;
> -
> - rq = last_request_on_engine(ring->timeline, engine);
> - if (!rq)
> - continue;
> -
> - any_active = true;
> -
> - if (rq->hw_context == ce)
> - continue;
> -
> - /*
> - * Was this request submitted after the previous
> - * switch-to-kernel-context?
> - */
> - if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
> - GEM_TRACE("%s needs barrier for %llx:%lld\n",
> - ring->timeline->name,
> - rq->fence.context,
> - rq->fence.seqno);
> - return false;
> - }
> -
> - GEM_TRACE("%s has barrier after %llx:%lld\n",
> - ring->timeline->name,
> - rq->fence.context,
> - rq->fence.seqno);
> - }
> -
> - /*
> - * If any other timeline was still active and behind the last barrier,
> - * then our last switch-to-kernel-context must still be queued and
> - * will run last (leaving the engine in the kernel context when it
> - * eventually idles).
> - */
> - if (any_active)
> - return true;
> -
> - /* The engine is idle; check that it is idling in the kernel context. */
> - return engine->last_retired_context == ce;
> -}
> -
> -int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
> +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
> + unsigned long mask)
> {
> struct intel_engine_cs *engine;
> - enum intel_engine_id id;
>
> GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
>
> @@ -771,17 +718,11 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
> if (i915_terminally_wedged(i915))
> return 0;
>
> - i915_retire_requests(i915);
> -
> - for_each_engine(engine, i915, id) {
> + for_each_engine_masked(engine, i915, mask, mask) {
> struct intel_ring *ring;
> struct i915_request *rq;
>
> GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
> - if (engine_has_kernel_context_barrier(engine))
> - continue;
> -
> - GEM_TRACE("emit barrier on %s\n", engine->name);
>
> rq = i915_request_alloc(engine, i915->kernel_context);
> if (IS_ERR(rq))
> @@ -805,7 +746,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
> i915_sw_fence_await_sw_fence_gfp(&rq->submit,
> &prev->submit,
> I915_FENCE_GFP);
> - i915_timeline_sync_set(rq->timeline, &prev->fence);
> }
>
> i915_request_add(rq);
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
> index 2f9ef333acaa..e1188d77a23d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/i915_gem_context.h
> @@ -372,7 +372,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
> void i915_gem_context_close(struct drm_file *file);
>
> int i915_switch_context(struct i915_request *rq);
> -int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
> +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
> + unsigned long engine_mask);
>
> void i915_gem_context_release(struct kref *ctx_ref);
> struct i915_gem_context *
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 68d74c50ac39..7d8e90dfca84 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -62,7 +62,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
> * the hopes that we can then remove contexts and the like only
> * bound by their active reference.
> */
> - err = i915_gem_switch_to_kernel_context(i915);
> + err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
> if (err)
> return err;
>
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index f8a63495114c..9533a85cb0b3 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1068,6 +1068,7 @@ void i915_request_add(struct i915_request *request)
> GEM_TRACE("marking %s as active\n", ring->timeline->name);
> list_add(&ring->active_link, &request->i915->gt.active_rings);
> }
> + request->i915->gt.active_engines |= request->engine->mask;
> request->emitted_jiffies = jiffies;
>
> /*
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 555a4590fa23..18174f808fd8 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -1106,6 +1106,9 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
>
> lockdep_assert_held(&engine->i915->drm.struct_mutex);
>
> + if (!engine->context_size)
> + return true;
> +
> /*
> * Check the last context seen by the engine. If active, it will be
> * the last request that remains in the timeline. When idle, it is
> @@ -1205,6 +1208,8 @@ void intel_engines_park(struct drm_i915_private *i915)
> i915_gem_batch_pool_fini(&engine->batch_pool);
> engine->execlists.no_priolist = false;
> }
> +
> + i915->gt.active_engines = 0;
> }
>
> /**
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> index cb3d77c95ddf..00ac34007582 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> @@ -1514,7 +1514,8 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
> }
> }
>
> - err = i915_gem_switch_to_kernel_context(i915);
> + err = i915_gem_switch_to_kernel_context(i915,
> + i915->gt.active_engines);
> if (err)
> return err;
>
> diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> index e0d3122fd35a..94aee4071a66 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> @@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
> cond_resched();
>
> if (flags & I915_WAIT_LOCKED &&
> - i915_gem_switch_to_kernel_context(i915)) {
> + i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
> pr_err("Failed to switch back to kernel context; declaring wedged\n");
> i915_gem_set_wedged(i915);
> }
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index b2c7808e0595..54cfb611c0aa 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -109,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work)
>
> static void mock_idle_work_handler(struct work_struct *work)
> {
> + struct drm_i915_private *i915 =
> + container_of(work, typeof(*i915), gt.idle_work.work);
> +
> + i915->gt.active_engines = 0;
> }
>
> static int pm_domain_resume(struct device *dev)
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list