[Intel-gfx] [PATCH 14/30] drm/i915: Move request runtime management onto gt
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Wed Oct 2 15:58:13 UTC 2019
On 02/10/2019 12:19, Chris Wilson wrote:
> Requests are run from the gt and are tided into the gt runtime power
> management, so pull the runtime request management under gt/
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/Makefile | 1 +
> drivers/gpu/drm/i915/gem/i915_gem_mman.c | 4 +-
> drivers/gpu/drm/i915/gem/i915_gem_pm.c | 28 +---
> .../drm/i915/gem/selftests/i915_gem_context.c | 5 +-
> .../drm/i915/gem/selftests/i915_gem_mman.c | 2 +-
> drivers/gpu/drm/i915/gt/intel_gt.c | 2 +
> drivers/gpu/drm/i915/gt/intel_gt_pm.c | 5 +-
> drivers/gpu/drm/i915/gt/intel_gt_requests.c | 123 ++++++++++++++++++
> drivers/gpu/drm/i915/gt/intel_gt_requests.h | 24 ++++
> drivers/gpu/drm/i915/gt/intel_gt_types.h | 11 ++
> drivers/gpu/drm/i915/gt/selftest_timeline.c | 8 +-
> drivers/gpu/drm/i915/i915_debugfs.c | 17 +--
> drivers/gpu/drm/i915/i915_drv.h | 10 --
> drivers/gpu/drm/i915/i915_gem.c | 17 ---
> drivers/gpu/drm/i915/i915_gem_evict.c | 14 +-
> drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +-
> drivers/gpu/drm/i915/i915_request.c | 64 +--------
> drivers/gpu/drm/i915/i915_request.h | 7 +-
> .../gpu/drm/i915/selftests/igt_flush_test.c | 9 +-
> .../gpu/drm/i915/selftests/igt_live_test.c | 5 +-
> .../gpu/drm/i915/selftests/mock_gem_device.c | 10 +-
> 21 files changed, 213 insertions(+), 158 deletions(-)
> create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_requests.c
> create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_requests.h
>
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index d2b53b5add81..06e1876d0250 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -83,6 +83,7 @@ gt-y += \
> gt/intel_gt_irq.o \
> gt/intel_gt_pm.o \
> gt/intel_gt_pm_irq.o \
> + gt/intel_gt_requests.o \
> gt/intel_hangcheck.o \
> gt/intel_lrc.o \
> gt/intel_rc6.o \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index 45bbd22c14f1..fd4122d8c0a9 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -8,6 +8,7 @@
> #include <linux/sizes.h>
>
> #include "gt/intel_gt.h"
> +#include "gt/intel_gt_requests.h"
>
> #include "i915_drv.h"
> #include "i915_gem_gtt.h"
> @@ -424,6 +425,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
> static int create_mmap_offset(struct drm_i915_gem_object *obj)
> {
> struct drm_i915_private *i915 = to_i915(obj->base.dev);
> + struct intel_gt *gt = &i915->gt;
> int err;
>
> err = drm_gem_create_mmap_offset(&obj->base);
> @@ -431,7 +433,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
> return 0;
>
> /* Attempt to reap some mmap space from dead objects */
> - err = i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT);
> + err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
> if (err)
> return err;
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index 90b211257f2d..9194d8464bf7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -7,31 +7,18 @@
> #include "gem/i915_gem_pm.h"
> #include "gt/intel_gt.h"
> #include "gt/intel_gt_pm.h"
> +#include "gt/intel_gt_requests.h"
>
> #include "i915_drv.h"
> #include "i915_globals.h"
>
> static void i915_gem_park(struct drm_i915_private *i915)
> {
> - cancel_delayed_work(&i915->gem.retire_work);
> -
> i915_vma_parked(i915);
>
> i915_globals_park();
> }
>
> -static void retire_work_handler(struct work_struct *work)
> -{
> - struct drm_i915_private *i915 =
> - container_of(work, typeof(*i915), gem.retire_work.work);
> -
> - i915_retire_requests(i915);
> -
> - queue_delayed_work(i915->wq,
> - &i915->gem.retire_work,
> - round_jiffies_up_relative(HZ));
> -}
> -
> static int pm_notifier(struct notifier_block *nb,
> unsigned long action,
> void *data)
> @@ -42,9 +29,6 @@ static int pm_notifier(struct notifier_block *nb,
> switch (action) {
> case INTEL_GT_UNPARK:
> i915_globals_unpark();
> - queue_delayed_work(i915->wq,
> - &i915->gem.retire_work,
> - round_jiffies_up_relative(HZ));
> break;
>
> case INTEL_GT_PARK:
> @@ -59,7 +43,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
> {
> bool result = !intel_gt_is_wedged(gt);
>
> - if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> /* XXX hide warning from gem_eio */
> if (i915_modparams.reset) {
> dev_err(gt->i915->drm.dev,
> @@ -122,14 +106,12 @@ void i915_gem_suspend(struct drm_i915_private *i915)
> * state. Fortunately, the kernel_context is disposable and we do
> * not rely on its state.
> */
> - switch_to_kernel_context_sync(&i915->gt);
> + intel_gt_suspend(&i915->gt);
> + intel_uc_suspend(&i915->gt.uc);
>
> cancel_delayed_work_sync(&i915->gt.hangcheck.work);
>
> i915_gem_drain_freed_objects(i915);
> -
> - intel_uc_suspend(&i915->gt.uc);
> - intel_gt_suspend(&i915->gt);
> }
>
> static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
> @@ -239,8 +221,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
>
> void i915_gem_init__pm(struct drm_i915_private *i915)
> {
> - INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
> -
> i915->gem.pm_notifier.notifier_call = pm_notifier;
> blocking_notifier_chain_register(&i915->gt.pm_notifications,
> &i915->gem.pm_notifier);
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index f902aeee1755..2288757808ae 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -8,6 +8,7 @@
>
> #include "gem/i915_gem_pm.h"
> #include "gt/intel_gt.h"
> +#include "gt/intel_gt_requests.h"
> #include "gt/intel_reset.h"
> #include "i915_selftest.h"
>
> @@ -518,7 +519,7 @@ create_test_object(struct i915_address_space *vm,
> int err;
>
> /* Keep in GEM's good graces */
> - i915_retire_requests(vm->i915);
> + intel_gt_retire_requests(vm->gt);
>
> size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
> size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
> @@ -1136,7 +1137,7 @@ __sseu_finish(const char *name,
> igt_spinner_end(spin);
>
> if ((flags & TEST_IDLE) && ret == 0) {
> - ret = i915_gem_wait_for_idle(ce->engine->i915,
> + ret = intel_gt_wait_for_idle(ce->engine->gt,
> MAX_SCHEDULE_TIMEOUT);
> if (ret)
> return ret;
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index 4ba6ed5c8313..1cd25cfd0246 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -573,7 +573,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
> {
> i915_gem_driver_unregister__shrinker(i915);
> intel_gt_pm_get(&i915->gt);
> - cancel_delayed_work_sync(&i915->gem.retire_work);
> + cancel_delayed_work_sync(&i915->gt.requests.retire_work);
> }
>
> static void restore_retire_worker(struct drm_i915_private *i915)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
> index 7205595369be..53220741e49e 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> @@ -6,6 +6,7 @@
> #include "i915_drv.h"
> #include "intel_gt.h"
> #include "intel_gt_pm.h"
> +#include "intel_gt_requests.h"
> #include "intel_mocs.h"
> #include "intel_rc6.h"
> #include "intel_uncore.h"
> @@ -23,6 +24,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
>
> intel_gt_init_hangcheck(gt);
> intel_gt_init_reset(gt);
> + intel_gt_init_requests(gt);
> intel_gt_pm_init_early(gt);
> intel_uc_init_early(>->uc);
> }
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> index bdb34f03ec47..d2e80ba64d69 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> @@ -10,6 +10,7 @@
> #include "intel_engine_pm.h"
> #include "intel_gt.h"
> #include "intel_gt_pm.h"
> +#include "intel_gt_requests.h"
> #include "intel_pm.h"
> #include "intel_rc6.h"
> #include "intel_wakeref.h"
> @@ -49,6 +50,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
> i915_pmu_gt_unparked(i915);
>
> intel_gt_queue_hangcheck(gt);
> + intel_gt_unpark_requests(gt);
>
> pm_notify(gt, INTEL_GT_UNPARK);
>
> @@ -64,6 +66,7 @@ static int __gt_park(struct intel_wakeref *wf)
> GEM_TRACE("\n");
>
> pm_notify(gt, INTEL_GT_PARK);
> + intel_gt_park_requests(gt);
>
> i915_pmu_gt_parked(i915);
> if (INTEL_GEN(i915) >= 6)
> @@ -196,7 +199,7 @@ int intel_gt_resume(struct intel_gt *gt)
>
> static void wait_for_idle(struct intel_gt *gt)
> {
> - if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> /*
> * Forcibly cancel outstanding work and leave
> * the gpu quiet.
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
> new file mode 100644
> index 000000000000..8aed89fd2cdc
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
> @@ -0,0 +1,123 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "i915_request.h"
> +#include "intel_gt.h"
> +#include "intel_gt_pm.h"
> +#include "intel_gt_requests.h"
> +#include "intel_timeline.h"
> +
> +static void retire_requests(struct intel_timeline *tl)
> +{
> + struct i915_request *rq, *rn;
> +
> + list_for_each_entry_safe(rq, rn, &tl->requests, link)
> + if (!i915_request_retire(rq))
> + break;
> +}
> +
> +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
> +{
> + struct intel_gt_timelines *timelines = >->timelines;
> + struct intel_timeline *tl, *tn;
> + unsigned long active_count = 0;
> + unsigned long flags;
> + bool interruptible;
> + LIST_HEAD(free);
> +
> + interruptible = true;
> + if (unlikely(timeout < 0))
> + timeout = -timeout, interruptible = false;
> +
> + spin_lock_irqsave(&timelines->lock, flags);
> + list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
> + if (!mutex_trylock(&tl->mutex))
> + continue;
> +
> + intel_timeline_get(tl);
> + GEM_BUG_ON(!tl->active_count);
> + tl->active_count++; /* pin the list element */
> + spin_unlock_irqrestore(&timelines->lock, flags);
> +
> + if (timeout > 0) {
> + struct dma_fence *fence;
> +
> + fence = i915_active_fence_get(&tl->last_request);
> + if (fence) {
> + timeout = dma_fence_wait_timeout(fence,
> + true,
> + timeout);
> + dma_fence_put(fence);
> + }
> + }
> +
> + retire_requests(tl);
> +
> + spin_lock_irqsave(&timelines->lock, flags);
> +
> + /* Resume iteration after dropping lock */
> + list_safe_reset_next(tl, tn, link);
> + if (--tl->active_count)
> + active_count += !!rcu_access_pointer(tl->last_request.fence);
> + else
> + list_del(&tl->link);
> +
> + mutex_unlock(&tl->mutex);
> +
> + /* Defer the final release to after the spinlock */
> + if (refcount_dec_and_test(&tl->kref.refcount)) {
> + GEM_BUG_ON(tl->active_count);
> + list_add(&tl->link, &free);
> + }
> + }
> + spin_unlock_irqrestore(&timelines->lock, flags);
> +
> + list_for_each_entry_safe(tl, tn, &free, link)
> + __intel_timeline_free(&tl->kref);
> +
> + return active_count ? timeout : 0;
> +}
> +
> +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
> +{
> + /* If the device is asleep, we have no requests outstanding */
> + if (!intel_gt_pm_is_awake(gt))
> + return 0;
> +
> + while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
> + cond_resched();
> + if (signal_pending(current))
> + return -EINTR;
> + }
> +
> + return timeout;
> +}
> +
> +static void retire_work_handler(struct work_struct *work)
> +{
> + struct intel_gt *gt =
> + container_of(work, typeof(*gt), requests.retire_work.work);
> +
> + intel_gt_retire_requests(gt);
> + schedule_delayed_work(>->requests.retire_work,
> + round_jiffies_up_relative(HZ));
> +}
> +
> +void intel_gt_init_requests(struct intel_gt *gt)
> +{
> + INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler);
> +}
> +
> +void intel_gt_park_requests(struct intel_gt *gt)
> +{
> + cancel_delayed_work(>->requests.retire_work);
> +}
> +
> +void intel_gt_unpark_requests(struct intel_gt *gt)
> +{
> + schedule_delayed_work(>->requests.retire_work,
> + round_jiffies_up_relative(HZ));
> +}
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
> new file mode 100644
> index 000000000000..bd31cbce47e0
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
> @@ -0,0 +1,24 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef INTEL_GT_REQUESTS_H
> +#define INTEL_GT_REQUESTS_H
> +
> +struct intel_gt;
> +
> +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
> +static inline void intel_gt_retire_requests(struct intel_gt *gt)
> +{
> + intel_gt_retire_requests_timeout(gt, 0);
> +}
> +
> +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
> +
> +void intel_gt_init_requests(struct intel_gt *gt);
> +void intel_gt_park_requests(struct intel_gt *gt);
> +void intel_gt_unpark_requests(struct intel_gt *gt);
> +
> +#endif /* INTEL_GT_REQUESTS_H */
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> index 7134f1319bbe..802f516a3430 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> @@ -50,6 +50,17 @@ struct intel_gt {
> struct list_head hwsp_free_list;
> } timelines;
>
> + struct intel_gt_requests {
> + /**
> + * We leave the user IRQ off as much as possible,
> + * but this means that requests will finish and never
> + * be retired once the system goes idle. Set a timer to
> + * fire periodically while the ring is running. When it
> + * fires, go retire requests.
> + */
> + struct delayed_work retire_work;
> + } requests;
> +
> struct intel_wakeref wakeref;
> atomic_t user_wakeref;
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
> index 16abfabf08c7..d6df40cdc8a6 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
> @@ -8,6 +8,7 @@
>
> #include "intel_engine_pm.h"
> #include "intel_gt.h"
> +#include "intel_gt_requests.h"
>
> #include "../selftests/i915_random.h"
> #include "../i915_selftest.h"
> @@ -641,6 +642,7 @@ static int live_hwsp_alternate(void *arg)
> static int live_hwsp_wrap(void *arg)
> {
> struct drm_i915_private *i915 = arg;
> + struct intel_gt *gt = &i915->gt;
> struct intel_engine_cs *engine;
> struct intel_timeline *tl;
> enum intel_engine_id id;
> @@ -651,7 +653,7 @@ static int live_hwsp_wrap(void *arg)
> * foreign GPU references.
> */
>
> - tl = intel_timeline_create(&i915->gt, NULL);
> + tl = intel_timeline_create(gt, NULL);
> if (IS_ERR(tl))
> return PTR_ERR(tl);
>
> @@ -662,7 +664,7 @@ static int live_hwsp_wrap(void *arg)
> if (err)
> goto out_free;
>
> - for_each_engine(engine, i915, id) {
> + for_each_engine(engine, gt->i915, id) {
> const u32 *hwsp_seqno[2];
> struct i915_request *rq;
> u32 seqno[2];
> @@ -734,7 +736,7 @@ static int live_hwsp_wrap(void *arg)
> goto out;
> }
>
> - i915_retire_requests(i915); /* recycle HWSP */
> + intel_gt_retire_requests(gt); /* recycle HWSP */
> }
>
> out:
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 5888a658e2b7..2afc41e43b6e 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -41,6 +41,7 @@
>
> #include "gem/i915_gem_context.h"
> #include "gt/intel_gt_pm.h"
> +#include "gt/intel_gt_requests.h"
> #include "gt/intel_reset.h"
> #include "gt/intel_rc6.h"
> #include "gt/uc/intel_guc_submission.h"
> @@ -3621,33 +3622,33 @@ static int
> i915_drop_caches_set(void *data, u64 val)
> {
> struct drm_i915_private *i915 = data;
> + struct intel_gt *gt = &i915->gt;
> int ret;
>
> DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
> val, val & DROP_ALL);
>
> if (val & DROP_RESET_ACTIVE &&
> - wait_for(intel_engines_are_idle(&i915->gt),
> - I915_IDLE_ENGINES_TIMEOUT))
> - intel_gt_set_wedged(&i915->gt);
> + wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
> + intel_gt_set_wedged(gt);
>
> if (val & DROP_RETIRE)
> - i915_retire_requests(i915);
> + intel_gt_retire_requests(gt);
>
> if (val & (DROP_IDLE | DROP_ACTIVE)) {
> - ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> + ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> if (ret)
> return ret;
> }
>
> if (val & DROP_IDLE) {
> - ret = intel_gt_pm_wait_for_idle(&i915->gt);
> + ret = intel_gt_pm_wait_for_idle(gt);
> if (ret)
> return ret;
> }
>
> - if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
> - intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
> + if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
> + intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
>
> fs_reclaim_acquire(GFP_KERNEL);
> if (val & DROP_BOUND)
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 44f3463ff9f1..cb63b2bd0ce8 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1710,15 +1710,6 @@ struct drm_i915_private {
>
> struct {
> struct notifier_block pm_notifier;
> -
> - /**
> - * We leave the user IRQ off as much as possible,
> - * but this means that requests will finish and never
> - * be retired once the system goes idle. Set a timer to
> - * fire periodically while the ring is running. When it
> - * fires, go retire requests.
> - */
> - struct delayed_work retire_work;
> } gem;
>
> /* For i945gm vblank irq vs. C3 workaround */
> @@ -2321,7 +2312,6 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
> void i915_gem_driver_unregister(struct drm_i915_private *i915);
> void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
> void i915_gem_driver_release(struct drm_i915_private *dev_priv);
> -int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
> void i915_gem_suspend(struct drm_i915_private *dev_priv);
> void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
> void i915_gem_resume(struct drm_i915_private *dev_priv);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 7c82fc39f655..5a664bdead8c 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -883,23 +883,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
> }
> }
>
> -int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
> -{
> - struct intel_gt *gt = &i915->gt;
> -
> - /* If the device is asleep, we have no requests outstanding */
> - if (!intel_gt_pm_is_awake(gt))
> - return 0;
> -
> - while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
> - cond_resched();
> - if (signal_pending(current))
> - return -EINTR;
> - }
> -
> - return timeout;
> -}
> -
> struct i915_vma *
> i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
> const struct i915_ggtt_view *view,
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 0a412f6d01d7..7e62c310290f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -29,6 +29,7 @@
> #include <drm/i915_drm.h>
>
> #include "gem/i915_gem_context.h"
> +#include "gt/intel_gt_requests.h"
>
> #include "i915_drv.h"
> #include "i915_trace.h"
> @@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
> bool fail_if_busy:1;
> } igt_evict_ctl;)
>
> -static int ggtt_flush(struct drm_i915_private *i915)
> +static int ggtt_flush(struct intel_gt *gt)
> {
> /*
> * Not everything in the GGTT is tracked via vma (otherwise we
> @@ -46,7 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
> * the hopes that we can then remove contexts and the like only
> * bound by their active reference.
> */
> - return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> + return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> }
>
> static bool
> @@ -92,7 +93,6 @@ i915_gem_evict_something(struct i915_address_space *vm,
> u64 start, u64 end,
> unsigned flags)
> {
> - struct drm_i915_private *dev_priv = vm->i915;
> struct drm_mm_scan scan;
> struct list_head eviction_list;
> struct i915_vma *vma, *next;
> @@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
> min_size, alignment, color,
> start, end, mode);
>
> - i915_retire_requests(vm->i915);
> + intel_gt_retire_requests(vm->gt);
>
> search_again:
> active = NULL;
> @@ -197,7 +197,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
> if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
> return -EBUSY;
>
> - ret = ggtt_flush(dev_priv);
> + ret = ggtt_flush(vm->gt);
> if (ret)
> return ret;
>
> @@ -270,7 +270,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
> * a stray pin (preventing eviction) that can only be resolved by
> * retiring.
> */
> - i915_retire_requests(vm->i915);
> + intel_gt_retire_requests(vm->gt);
>
> if (i915_vm_has_cache_coloring(vm)) {
> /* Expand search to cover neighbouring guard pages (or lack!) */
> @@ -372,7 +372,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
> * switch otherwise is ineffective.
> */
> if (i915_is_ggtt(vm)) {
> - ret = ggtt_flush(vm->i915);
> + ret = ggtt_flush(vm->gt);
> if (ret)
> return ret;
> }
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 082fcf9085a6..1d26634ca597 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -38,6 +38,7 @@
>
> #include "display/intel_frontbuffer.h"
> #include "gt/intel_gt.h"
> +#include "gt/intel_gt_requests.h"
>
> #include "i915_drv.h"
> #include "i915_scatterlist.h"
> @@ -2529,8 +2530,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
>
> if (unlikely(ggtt->do_idle_maps)) {
> /* XXX This does not prevent more requests being submitted! */
> - if (i915_retire_requests_timeout(dev_priv,
> - -MAX_SCHEDULE_TIMEOUT)) {
> + if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
> + -MAX_SCHEDULE_TIMEOUT)) {
> DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
> /* Wait a bit, in hopes it avoids the hang */
> udelay(10);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 52f7c4e5b644..437f9fc6282e 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -216,7 +216,7 @@ static void remove_from_engine(struct i915_request *rq)
> spin_unlock(&locked->active.lock);
> }
>
> -static bool i915_request_retire(struct i915_request *rq)
> +bool i915_request_retire(struct i915_request *rq)
> {
> if (!i915_request_completed(rq))
> return false;
> @@ -1508,68 +1508,6 @@ long i915_request_wait(struct i915_request *rq,
> return timeout;
> }
>
> -long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
> -{
> - struct intel_gt_timelines *timelines = &i915->gt.timelines;
> - struct intel_timeline *tl, *tn;
> - unsigned long active_count = 0;
> - unsigned long flags;
> - bool interruptible;
> - LIST_HEAD(free);
> -
> - interruptible = true;
> - if (timeout < 0)
> - timeout = -timeout, interruptible = false;
> -
> - spin_lock_irqsave(&timelines->lock, flags);
> - list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
> - if (!mutex_trylock(&tl->mutex))
> - continue;
> -
> - intel_timeline_get(tl);
> - GEM_BUG_ON(!tl->active_count);
> - tl->active_count++; /* pin the list element */
> - spin_unlock_irqrestore(&timelines->lock, flags);
> -
> - if (timeout > 0) {
> - struct dma_fence *fence;
> -
> - fence = i915_active_fence_get(&tl->last_request);
> - if (fence) {
> - timeout = dma_fence_wait_timeout(fence,
> - interruptible,
> - timeout);
> - dma_fence_put(fence);
> - }
> - }
> -
> - retire_requests(tl);
> -
> - spin_lock_irqsave(&timelines->lock, flags);
> -
> - /* Resume iteration after dropping lock */
> - list_safe_reset_next(tl, tn, link);
> - if (--tl->active_count)
> - active_count += !!rcu_access_pointer(tl->last_request.fence);
> - else
> - list_del(&tl->link);
> -
> - mutex_unlock(&tl->mutex);
> -
> - /* Defer the final release to after the spinlock */
> - if (refcount_dec_and_test(&tl->kref.refcount)) {
> - GEM_BUG_ON(tl->active_count);
> - list_add(&tl->link, &free);
> - }
> - }
> - spin_unlock_irqrestore(&timelines->lock, flags);
> -
> - list_for_each_entry_safe(tl, tn, &free, link)
> - __intel_timeline_free(&tl->kref);
> -
> - return active_count ? timeout : 0;
> -}
> -
> #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> #include "selftests/mock_request.c"
> #include "selftests/i915_request.c"
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 256b0715180f..6a95242b280d 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -250,6 +250,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request);
> void __i915_request_queue(struct i915_request *rq,
> const struct i915_sched_attr *attr);
>
> +bool i915_request_retire(struct i915_request *rq);
> void i915_request_retire_upto(struct i915_request *rq);
>
> static inline struct i915_request *
> @@ -459,10 +460,4 @@ i915_request_active_timeline(struct i915_request *rq)
> lockdep_is_held(&rq->engine->active.lock));
> }
>
> -long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
> -static inline void i915_retire_requests(struct drm_i915_private *i915)
> -{
> - i915_retire_requests_timeout(i915, 0);
> -}
> -
> #endif /* I915_REQUEST_H */
> diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> index ed496bd6d84f..7b0939e3f007 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> @@ -4,8 +4,8 @@
> * Copyright © 2018 Intel Corporation
> */
>
> -#include "gem/i915_gem_context.h"
> #include "gt/intel_gt.h"
> +#include "gt/intel_gt_requests.h"
>
> #include "i915_drv.h"
> #include "i915_selftest.h"
> @@ -14,11 +14,12 @@
>
> int igt_flush_test(struct drm_i915_private *i915)
> {
> - int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
> + struct intel_gt *gt = &i915->gt;
> + int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
>
> cond_resched();
>
> - if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
> + if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
> pr_err("%pS timed out, cancelling all further testing.\n",
> __builtin_return_address(0));
>
> @@ -26,7 +27,7 @@ int igt_flush_test(struct drm_i915_private *i915)
> __builtin_return_address(0));
> GEM_TRACE_DUMP();
>
> - intel_gt_set_wedged(&i915->gt);
> + intel_gt_set_wedged(gt);
> ret = -EIO;
> }
>
> diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
> index eae90f97df6c..810b60100c2c 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
> @@ -4,7 +4,8 @@
> * Copyright © 2018 Intel Corporation
> */
>
> -#include "../i915_drv.h"
> +#include "i915_drv.h"
> +#include "gt/intel_gt_requests.h"
>
> #include "../i915_selftest.h"
> #include "igt_flush_test.h"
> @@ -23,7 +24,7 @@ int igt_live_test_begin(struct igt_live_test *t,
> t->func = func;
> t->name = name;
>
> - err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
> + err = intel_gt_wait_for_idle(&i915->gt, MAX_SCHEDULE_TIMEOUT);
> if (err) {
> pr_err("%s(%s): failed to idle before, with err=%d!",
> func, name, err);
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index 3b589bbb2c2d..4e6cde0d4859 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -26,6 +26,7 @@
> #include <linux/pm_runtime.h>
>
> #include "gt/intel_gt.h"
> +#include "gt/intel_gt_requests.h"
> #include "gt/mock_engine.h"
>
> #include "mock_request.h"
> @@ -44,7 +45,8 @@ void mock_device_flush(struct drm_i915_private *i915)
> do {
> for_each_engine(engine, i915, id)
> mock_engine_flush(engine);
> - } while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT));
> + } while (intel_gt_retire_requests_timeout(&i915->gt,
> + MAX_SCHEDULE_TIMEOUT));
> }
>
> static void mock_device_release(struct drm_device *dev)
> @@ -98,10 +100,6 @@ static void release_dev(struct device *dev)
> kfree(pdev);
> }
>
> -static void mock_retire_work_handler(struct work_struct *work)
> -{
> -}
> -
> static int pm_domain_resume(struct device *dev)
> {
> return pm_generic_runtime_resume(dev);
> @@ -181,8 +179,6 @@ struct drm_i915_private *mock_gem_device(void)
>
> mock_init_contexts(i915);
>
> - INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
> -
> intel_timelines_init(i915);
>
> mutex_lock(&i915->drm.struct_mutex);
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list