[Intel-gfx] [PATCH 11/27] drm/i915: Merge wait_for_timelines with retire_request
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Wed Sep 25 10:47:35 UTC 2019
On 25/09/2019 11:01, Chris Wilson wrote:
> wait_for_timelines is essentially the same loop as retiring requests
> (with an extra timeout), so merge the two into one routine.
>
> v2: i915_retire_requests_timeout and keep VT'd w/a as !interruptible
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_mman.c | 4 +-
> drivers/gpu/drm/i915/gem/i915_gem_pm.c | 4 +-
> .../drm/i915/gem/selftests/i915_gem_context.c | 2 +-
> drivers/gpu/drm/i915/i915_debugfs.c | 4 +-
> drivers/gpu/drm/i915/i915_drv.h | 3 +-
> drivers/gpu/drm/i915/i915_gem.c | 67 ++-----------------
> drivers/gpu/drm/i915/i915_gem_evict.c | 12 ++--
> drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +-
> drivers/gpu/drm/i915/i915_request.c | 26 ++++++-
> drivers/gpu/drm/i915/i915_request.h | 7 +-
> .../gpu/drm/i915/selftests/igt_flush_test.c | 4 +-
> .../gpu/drm/i915/selftests/igt_live_test.c | 4 +-
> .../gpu/drm/i915/selftests/mock_gem_device.c | 2 +-
> 13 files changed, 49 insertions(+), 94 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index c44614895487..9cb59fe1a736 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -431,9 +431,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
>
> /* Attempt to reap some mmap space from dead objects */
> do {
> - err = i915_gem_wait_for_idle(i915,
> - I915_WAIT_INTERRUPTIBLE,
> - MAX_SCHEDULE_TIMEOUT);
> + err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> if (err)
> break;
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index c9c9e49391fa..04a9a27d659d 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -59,9 +59,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
> {
> bool result = !intel_gt_is_wedged(gt);
>
> - if (i915_gem_wait_for_idle(gt->i915,
> - I915_WAIT_FOR_IDLE_BOOST,
> - I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> + if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> /* XXX hide warning from gem_eio */
> if (i915_modparams.reset) {
> dev_err(gt->i915->drm.dev,
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index 82f54e819eb0..b26f550f8430 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -923,7 +923,7 @@ __sseu_finish(const char *name,
>
> if ((flags & TEST_IDLE) && ret == 0) {
> ret = i915_gem_wait_for_idle(ce->engine->i915,
> - 0, MAX_SCHEDULE_TIMEOUT);
> + MAX_SCHEDULE_TIMEOUT);
> if (ret)
> return ret;
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 7bbc9eda8ebe..033764326601 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -3632,9 +3632,7 @@ i915_drop_caches_set(void *data, u64 val)
> i915_retire_requests(i915);
>
> if (val & (DROP_IDLE | DROP_ACTIVE)) {
> - ret = i915_gem_wait_for_idle(i915,
> - I915_WAIT_INTERRUPTIBLE,
> - MAX_SCHEDULE_TIMEOUT);
> + ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> if (ret)
> return ret;
> }
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 3611fd5446f3..6e5d5e4ca91d 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2328,8 +2328,7 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
> void i915_gem_driver_unregister(struct drm_i915_private *i915);
> void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
> void i915_gem_driver_release(struct drm_i915_private *dev_priv);
> -int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
> - unsigned int flags, long timeout);
> +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
> void i915_gem_suspend(struct drm_i915_private *dev_priv);
> void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
> void i915_gem_resume(struct drm_i915_private *dev_priv);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 47c53bb9eb89..ab23944571fa 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -883,61 +883,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
> }
> }
>
> -static long
> -wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
> -{
> - struct intel_gt_timelines *timelines = >->timelines;
> - struct intel_timeline *tl;
> - unsigned long flags;
> -
> - spin_lock_irqsave(&timelines->lock, flags);
> - list_for_each_entry(tl, &timelines->active_list, link) {
> - struct dma_fence *fence;
> -
> - fence = i915_active_fence_get(&tl->last_request);
> - if (!fence)
> - continue;
> -
> - spin_unlock_irqrestore(&timelines->lock, flags);
> -
> - if (!dma_fence_is_i915(fence)) {
> - timeout = dma_fence_wait_timeout(fence,
> - flags & I915_WAIT_INTERRUPTIBLE,
> - timeout);
> - } else {
> - struct i915_request *rq = to_request(fence);
> -
> - /*
> - * "Race-to-idle".
> - *
> - * Switching to the kernel context is often used as
> - * a synchronous step prior to idling, e.g. in suspend
> - * for flushing all current operations to memory before
> - * sleeping. These we want to complete as quickly as
> - * possible to avoid prolonged stalls, so allow the gpu
> - * to boost to maximum clocks.
> - */
> - if (flags & I915_WAIT_FOR_IDLE_BOOST)
> - gen6_rps_boost(rq);
> -
> - timeout = i915_request_wait(rq, flags, timeout);
> - }
> -
> - dma_fence_put(fence);
> - if (timeout < 0)
> - return timeout;
> -
> - /* restart after reacquiring the lock */
> - spin_lock_irqsave(&timelines->lock, flags);
> - tl = list_entry(&timelines->active_list, typeof(*tl), link);
> - }
> - spin_unlock_irqrestore(&timelines->lock, flags);
> -
> - return timeout;
> -}
> -
> -int i915_gem_wait_for_idle(struct drm_i915_private *i915,
> - unsigned int flags, long timeout)
> +int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
> {
> struct intel_gt *gt = &i915->gt;
>
> @@ -945,18 +891,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
> if (!intel_gt_pm_is_awake(gt))
> return 0;
>
> - do {
> - timeout = wait_for_timelines(gt, flags, timeout);
> - if (timeout < 0)
> - return timeout;
> -
> + while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
> cond_resched();
> if (signal_pending(current))
> return -EINTR;
> + }
>
> - } while (i915_retire_requests(i915));
> -
> - return 0;
> + return timeout;
> }
>
> struct i915_vma *
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 0552bf93eea3..0a412f6d01d7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -46,9 +46,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
> * the hopes that we can then remove contexts and the like only
> * bound by their active reference.
> */
> - return i915_gem_wait_for_idle(i915,
> - I915_WAIT_INTERRUPTIBLE,
> - MAX_SCHEDULE_TIMEOUT);
> + return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> }
>
> static bool
> @@ -126,6 +124,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
> min_size, alignment, color,
> start, end, mode);
>
> + i915_retire_requests(vm->i915);
> +
> search_again:
> active = NULL;
> INIT_LIST_HEAD(&eviction_list);
> @@ -264,13 +264,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
>
> trace_i915_gem_evict_node(vm, target, flags);
>
> - /* Retire before we search the active list. Although we have
> + /*
> + * Retire before we search the active list. Although we have
> * reasonable accuracy in our retirement lists, we may have
> * a stray pin (preventing eviction) that can only be resolved by
> * retiring.
> */
> - if (!(flags & PIN_NONBLOCK))
> - i915_retire_requests(vm->i915);
> + i915_retire_requests(vm->i915);
>
> if (i915_vm_has_cache_coloring(vm)) {
> /* Expand search to cover neighbouring guard pages (or lack!) */
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index bf57c41fa1fb..30ee016ac2ba 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2528,7 +2528,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
> struct i915_ggtt *ggtt = &dev_priv->ggtt;
>
> if (unlikely(ggtt->do_idle_maps)) {
> - if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
> + /* XXX This does not prevent more requests being submitted! */
> + if (i915_retire_requests_timeout(dev_priv,
> + -MAX_SCHEDULE_TIMEOUT)) {
> DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
> /* Wait a bit, in hopes it avoids the hang */
> udelay(10);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 4ffe62a42186..52f7c4e5b644 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1508,13 +1508,19 @@ long i915_request_wait(struct i915_request *rq,
> return timeout;
> }
>
> -bool i915_retire_requests(struct drm_i915_private *i915)
> +long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
> {
> struct intel_gt_timelines *timelines = &i915->gt.timelines;
> struct intel_timeline *tl, *tn;
> + unsigned long active_count = 0;
> unsigned long flags;
> + bool interruptible;
> LIST_HEAD(free);
>
> + interruptible = true;
> + if (timeout < 0)
> + timeout = -timeout, interruptible = false;
Slightly uneasy about the negative trick but alternatives which came to
mind of or-ing the negative bit or wrapper for passing in the
(non-)interruptible(timeout) do not sound so hot either.
> +
> spin_lock_irqsave(&timelines->lock, flags);
> list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
> if (!mutex_trylock(&tl->mutex))
> @@ -1525,13 +1531,27 @@ bool i915_retire_requests(struct drm_i915_private *i915)
> tl->active_count++; /* pin the list element */
> spin_unlock_irqrestore(&timelines->lock, flags);
>
> + if (timeout > 0) {
> + struct dma_fence *fence;
> +
> + fence = i915_active_fence_get(&tl->last_request);
> + if (fence) {
> + timeout = dma_fence_wait_timeout(fence,
> + interruptible,
> + timeout);
> + dma_fence_put(fence);
> + }
> + }
> +
> retire_requests(tl);
>
> spin_lock_irqsave(&timelines->lock, flags);
>
> /* Resume iteration after dropping lock */
> list_safe_reset_next(tl, tn, link);
> - if (!--tl->active_count)
> + if (--tl->active_count)
> + active_count += !!rcu_access_pointer(tl->last_request.fence);
> + else
> list_del(&tl->link);
>
> mutex_unlock(&tl->mutex);
> @@ -1547,7 +1567,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
> list_for_each_entry_safe(tl, tn, &free, link)
> __intel_timeline_free(&tl->kref);
>
> - return !list_empty(&timelines->active_list);
> + return active_count ? timeout : 0;
> }
>
> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 621fb33cda30..256b0715180f 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -310,7 +310,6 @@ long i915_request_wait(struct i915_request *rq,
> #define I915_WAIT_INTERRUPTIBLE BIT(0)
> #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
> #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
> -#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
>
> static inline bool i915_request_signaled(const struct i915_request *rq)
> {
> @@ -460,6 +459,10 @@ i915_request_active_timeline(struct i915_request *rq)
> lockdep_is_held(&rq->engine->active.lock));
> }
>
> -bool i915_retire_requests(struct drm_i915_private *i915);
> +long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
> +static inline void i915_retire_requests(struct drm_i915_private *i915)
> +{
> + i915_retire_requests_timeout(i915, 0);
> +}
>
> #endif /* I915_REQUEST_H */
> diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> index 2a5fbe46ea9f..ed496bd6d84f 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> @@ -18,8 +18,7 @@ int igt_flush_test(struct drm_i915_private *i915)
>
> cond_resched();
>
> - i915_retire_requests(i915);
> - if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
> + if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
> pr_err("%pS timed out, cancelling all further testing.\n",
> __builtin_return_address(0));
>
> @@ -30,7 +29,6 @@ int igt_flush_test(struct drm_i915_private *i915)
> intel_gt_set_wedged(&i915->gt);
> ret = -EIO;
> }
> - i915_retire_requests(i915);
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
> index 04a6f88fdf64..eae90f97df6c 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
> @@ -23,9 +23,7 @@ int igt_live_test_begin(struct igt_live_test *t,
> t->func = func;
> t->name = name;
>
> - err = i915_gem_wait_for_idle(i915,
> - I915_WAIT_INTERRUPTIBLE,
> - MAX_SCHEDULE_TIMEOUT);
> + err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> if (err) {
> pr_err("%s(%s): failed to idle before, with err=%d!",
> func, name, err);
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index 91f6253939ad..87d34167bf89 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -44,7 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
> do {
> for_each_engine(engine, i915, id)
> mock_engine_flush(engine);
> - } while (i915_retire_requests(i915));
> + } while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT));
> }
>
> static void mock_device_release(struct drm_device *dev)
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list