[Intel-gfx] [PATCH 17/21] drm/i915: Drop struct_mutex from around i915_retire_requests()

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Sep 24 15:25:29 UTC 2019


On 02/09/2019 05:02, Chris Wilson wrote:
> We don't need to hold struct_mutex now for retiring requests, so drop it
> from i915_retire_requests() and i915_gem_wait_for_idle(), finally
> removing I915_WAIT_LOCKED for good.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   .../gpu/drm/i915/gem/i915_gem_client_blt.c    |   7 +-
>   drivers/gpu/drm/i915/gem/i915_gem_context.c   |  20 +--
>   drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  45 +++----
>   .../i915/gem/selftests/i915_gem_coherency.c   |  40 +++---
>   .../drm/i915/gem/selftests/i915_gem_context.c |   4 +-
>   .../drm/i915/gem/selftests/i915_gem_mman.c    |   6 +-
>   .../i915/gem/selftests/i915_gem_object_blt.c  |   4 -
>   drivers/gpu/drm/i915/gt/selftest_context.c    |   4 +-
>   drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  89 +++----------
>   drivers/gpu/drm/i915/gt/selftest_lrc.c        |  21 ++-
>   drivers/gpu/drm/i915/gt/selftest_timeline.c   |  91 ++++++-------
>   .../gpu/drm/i915/gt/selftest_workarounds.c    |   6 +-
>   drivers/gpu/drm/i915/i915_debugfs.c           |  42 ++----
>   drivers/gpu/drm/i915/i915_gem.c               |  19 ++-
>   drivers/gpu/drm/i915/i915_request.h           |   7 +-
>   drivers/gpu/drm/i915/selftests/i915_active.c  |   8 +-
>   .../gpu/drm/i915/selftests/i915_gem_evict.c   |   2 +-
>   drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |   4 -
>   drivers/gpu/drm/i915/selftests/i915_request.c | 125 +++++-------------
>   .../gpu/drm/i915/selftests/i915_selftest.c    |   8 +-
>   drivers/gpu/drm/i915/selftests/i915_vma.c     |   4 -
>   .../gpu/drm/i915/selftests/igt_flush_test.c   |  30 ++---
>   .../gpu/drm/i915/selftests/igt_flush_test.h   |   2 +-
>   .../gpu/drm/i915/selftests/igt_live_test.c    |   9 +-
>   .../gpu/drm/i915/selftests/mock_gem_device.c  |   4 -
>   25 files changed, 191 insertions(+), 410 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> index ace50bb9ee1f..cf2057e515af 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> @@ -166,7 +166,6 @@ static int move_to_active(struct i915_vma *vma, struct i915_request *rq)
>   static void clear_pages_worker(struct work_struct *work)
>   {
>   	struct clear_pages_work *w = container_of(work, typeof(*w), work);
> -	struct drm_i915_private *i915 = w->ce->engine->i915;
>   	struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
>   	struct i915_vma *vma = w->sleeve->vma;
>   	struct i915_request *rq;
> @@ -184,11 +183,9 @@ static void clear_pages_worker(struct work_struct *work)
>   	obj->read_domains = I915_GEM_GPU_DOMAINS;
>   	obj->write_domain = 0;
>   
> -	/* XXX: we need to kill this */
> -	mutex_lock(&i915->drm.struct_mutex);
>   	err = i915_vma_pin(vma, 0, 0, PIN_USER);
>   	if (unlikely(err))
> -		goto out_unlock;
> +		goto out_signal;
>   
>   	batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
>   	if (IS_ERR(batch)) {
> @@ -240,8 +237,6 @@ static void clear_pages_worker(struct work_struct *work)
>   	intel_emit_vma_release(w->ce, batch);
>   out_unpin:
>   	i915_vma_unpin(vma);
> -out_unlock:
> -	mutex_unlock(&i915->drm.struct_mutex);
>   out_signal:
>   	if (unlikely(err)) {
>   		dma_fence_set_error(&w->dma, err);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index b8ddcf7899a1..3452f1497094 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -1161,8 +1161,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
>   }
>   
>   static int
> -__intel_context_reconfigure_sseu(struct intel_context *ce,
> -				 struct intel_sseu sseu)
> +intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
>   {
>   	int ret;
>   
> @@ -1185,23 +1184,6 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
>   	return ret;
>   }
>   
> -static int
> -intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
> -{
> -	struct drm_i915_private *i915 = ce->engine->i915;
> -	int ret;
> -
> -	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
> -	if (ret)
> -		return ret;
> -
> -	ret = __intel_context_reconfigure_sseu(ce, sseu);
> -
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
> -	return ret;
> -}
> -
>   static int
>   user_to_context_sseu(struct drm_i915_private *i915,
>   		     const struct drm_i915_gem_context_param_sseu *user,
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index 6e4cc177cc7a..fec0b410d1d9 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -48,11 +48,7 @@ static void retire_work_handler(struct work_struct *work)
>   	struct drm_i915_private *i915 =
>   		container_of(work, typeof(*i915), gem.retire_work.work);
>   
> -	/* Come back later if the device is busy... */
> -	if (mutex_trylock(&i915->drm.struct_mutex)) {
> -		i915_retire_requests(i915);
> -		mutex_unlock(&i915->drm.struct_mutex);
> -	}
> +	i915_retire_requests(i915);
>   
>   	queue_delayed_work(i915->wq,
>   			   &i915->gem.retire_work,
> @@ -86,26 +82,23 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
>   {
>   	bool result = !intel_gt_is_wedged(gt);
>   
> -	do {
> -		if (i915_gem_wait_for_idle(gt->i915,
> -					   I915_WAIT_LOCKED |
> -					   I915_WAIT_FOR_IDLE_BOOST,
> -					   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> -			/* XXX hide warning from gem_eio */
> -			if (i915_modparams.reset) {
> -				dev_err(gt->i915->drm.dev,
> -					"Failed to idle engines, declaring wedged!\n");
> -				GEM_TRACE_DUMP();
> -			}
> -
> -			/*
> -			 * Forcibly cancel outstanding work and leave
> -			 * the gpu quiet.
> -			 */
> -			intel_gt_set_wedged(gt);
> -			result = false;
> +	if (i915_gem_wait_for_idle(gt->i915,
> +				   I915_WAIT_FOR_IDLE_BOOST,
> +				   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
> +		/* XXX hide warning from gem_eio */
> +		if (i915_modparams.reset) {
> +			dev_err(gt->i915->drm.dev,
> +				"Failed to idle engines, declaring wedged!\n");
> +			GEM_TRACE_DUMP();
>   		}
> -	} while (i915_retire_requests(gt->i915) && result);
> +
> +		/*
> +		 * Forcibly cancel outstanding work and leave
> +		 * the gpu quiet.
> +		 */
> +		intel_gt_set_wedged(gt);
> +		result = false;
> +	}
>   
>   	if (intel_gt_pm_wait_for_idle(gt))
>   		result = false;
> @@ -125,8 +118,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
>   	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
>   	flush_workqueue(i915->wq);
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -
>   	/*
>   	 * We have to flush all the executing contexts to main memory so
>   	 * that they can saved in the hibernation image. To ensure the last
> @@ -138,8 +129,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
>   	 */
>   	switch_to_kernel_context_sync(&i915->gt);
>   
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
>   
>   	i915_gem_drain_freed_objects(i915);
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
> index 0ff7a89aadca..549810f70aeb 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
> @@ -7,6 +7,7 @@
>   #include <linux/prime_numbers.h>
>   
>   #include "gt/intel_gt.h"
> +#include "gt/intel_gt_pm.h"
>   
>   #include "i915_selftest.h"
>   #include "selftests/i915_random.h"
> @@ -78,7 +79,7 @@ static int gtt_set(struct drm_i915_gem_object *obj,
>   {
>   	struct i915_vma *vma;
>   	u32 __iomem *map;
> -	int err;
> +	int err = 0;
>   
>   	i915_gem_object_lock(obj);
>   	err = i915_gem_object_set_to_gtt_domain(obj, true);
> @@ -90,15 +91,21 @@ static int gtt_set(struct drm_i915_gem_object *obj,
>   	if (IS_ERR(vma))
>   		return PTR_ERR(vma);
>   
> +	intel_gt_pm_get(vma->vm->gt);
> +
>   	map = i915_vma_pin_iomap(vma);
>   	i915_vma_unpin(vma);
> -	if (IS_ERR(map))
> -		return PTR_ERR(map);
> +	if (IS_ERR(map)) {
> +		err = PTR_ERR(map);
> +		goto out_rpm;
> +	}
>   
>   	iowrite32(v, &map[offset / sizeof(*map)]);
>   	i915_vma_unpin_iomap(vma);
>   
> -	return 0;
> +out_rpm:
> +	intel_gt_pm_put(vma->vm->gt);
> +	return err;
>   }
>   
>   static int gtt_get(struct drm_i915_gem_object *obj,
> @@ -107,7 +114,7 @@ static int gtt_get(struct drm_i915_gem_object *obj,
>   {
>   	struct i915_vma *vma;
>   	u32 __iomem *map;
> -	int err;
> +	int err = 0;
>   
>   	i915_gem_object_lock(obj);
>   	err = i915_gem_object_set_to_gtt_domain(obj, false);
> @@ -119,15 +126,21 @@ static int gtt_get(struct drm_i915_gem_object *obj,
>   	if (IS_ERR(vma))
>   		return PTR_ERR(vma);
>   
> +	intel_gt_pm_get(vma->vm->gt);
> +
>   	map = i915_vma_pin_iomap(vma);
>   	i915_vma_unpin(vma);
> -	if (IS_ERR(map))
> -		return PTR_ERR(map);
> +	if (IS_ERR(map)) {
> +		err = PTR_ERR(map);
> +		goto out_rpm;
> +	}
>   
>   	*v = ioread32(&map[offset / sizeof(*map)]);
>   	i915_vma_unpin_iomap(vma);
>   
> -	return 0;
> +out_rpm:
> +	intel_gt_pm_put(vma->vm->gt);
> +	return err;
>   }
>   
>   static int wc_set(struct drm_i915_gem_object *obj,
> @@ -280,7 +293,6 @@ static int igt_gem_coherency(void *arg)
>   	struct drm_i915_private *i915 = arg;
>   	const struct igt_coherency_mode *read, *write, *over;
>   	struct drm_i915_gem_object *obj;
> -	intel_wakeref_t wakeref;
>   	unsigned long count, n;
>   	u32 *offsets, *values;
>   	int err = 0;
> @@ -299,8 +311,6 @@ static int igt_gem_coherency(void *arg)
>   
>   	values = offsets + ncachelines;
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
>   	for (over = igt_coherency_mode; over->name; over++) {
>   		if (!over->set)
>   			continue;
> @@ -326,7 +336,7 @@ static int igt_gem_coherency(void *arg)
>   					obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
>   					if (IS_ERR(obj)) {
>   						err = PTR_ERR(obj);
> -						goto unlock;
> +						goto free;
>   					}
>   
>   					i915_random_reorder(offsets, ncachelines, &prng);
> @@ -377,15 +387,13 @@ static int igt_gem_coherency(void *arg)
>   			}
>   		}
>   	}
> -unlock:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
> +free:
>   	kfree(offsets);
>   	return err;
>   
>   put_object:
>   	i915_gem_object_put(obj);
> -	goto unlock;
> +	goto free;
>   }
>   
>   int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index aa67c02ba98c..b87e35a713b8 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -949,7 +949,7 @@ __sseu_test(const char *name,
>   	if (ret)
>   		return ret;
>   
> -	ret = __intel_context_reconfigure_sseu(ce, sseu);
> +	ret = intel_context_reconfigure_sseu(ce, sseu);
>   	if (ret)
>   		goto out_spin;
>   
> @@ -1053,7 +1053,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
>   		goto out_fail;
>   
>   out_fail:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		ret = -EIO;
>   
>   	intel_context_unpin(ce);
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> index 9c217dfe96a9..39c01bc4eb51 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
> @@ -393,12 +393,8 @@ static void disable_retire_worker(struct drm_i915_private *i915)
>   
>   static void restore_retire_worker(struct drm_i915_private *i915)
>   {
> +	igt_flush_test(i915);
>   	intel_gt_pm_put(&i915->gt);
> -
> -	mutex_lock(&i915->drm.struct_mutex);
> -	igt_flush_test(i915, I915_WAIT_LOCKED);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	i915_gem_driver_register__shrinker(i915);
>   }
>   
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> index c21d747e7d05..9ec55b3a3815 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
> @@ -65,9 +65,7 @@ static int igt_fill_blt(void *arg)
>   		if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
>   			obj->cache_dirty = true;
>   
> -		mutex_lock(&i915->drm.struct_mutex);
>   		err = i915_gem_object_fill_blt(obj, ce, val);
> -		mutex_unlock(&i915->drm.struct_mutex);
>   		if (err)
>   			goto err_unpin;
>   
> @@ -166,9 +164,7 @@ static int igt_copy_blt(void *arg)
>   		if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
>   			dst->cache_dirty = true;
>   
> -		mutex_lock(&i915->drm.struct_mutex);
>   		err = i915_gem_object_copy_blt(src, dst, ce);
> -		mutex_unlock(&i915->drm.struct_mutex);
>   		if (err)
>   			goto err_unpin;
>   
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> index 1420533e8fd5..883739354b07 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_context.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -312,7 +312,7 @@ static int live_active_context(void *arg)
>   		if (err)
>   			break;
>   
> -		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	}
> @@ -425,7 +425,7 @@ static int live_remote_context(void *arg)
>   		if (err)
>   			break;
>   
> -		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	}
> diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> index e53eea1050f8..fc4d02406536 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> @@ -58,7 +58,9 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
>   	memset(h, 0, sizeof(*h));
>   	h->gt = gt;
>   
> +	mutex_lock(&gt->i915->drm.struct_mutex);
>   	h->ctx = kernel_context(gt->i915);
> +	mutex_unlock(&gt->i915->drm.struct_mutex);
>   	if (IS_ERR(h->ctx))
>   		return PTR_ERR(h->ctx);
>   
> @@ -285,7 +287,7 @@ static void hang_fini(struct hang *h)
>   
>   	kernel_context_close(h->ctx);
>   
> -	igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
> +	igt_flush_test(h->gt->i915);
>   }
>   
>   static bool wait_until_running(struct hang *h, struct i915_request *rq)
> @@ -309,10 +311,9 @@ static int igt_hang_sanitycheck(void *arg)
>   
>   	/* Basic check that we can execute our hanging batch */
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
>   	err = hang_init(&h, gt);
>   	if (err)
> -		goto unlock;
> +		return err;
>   
>   	for_each_engine(engine, gt->i915, id) {
>   		struct intel_wedge_me w;
> @@ -355,8 +356,6 @@ static int igt_hang_sanitycheck(void *arg)
>   
>   fini:
>   	hang_fini(&h);
> -unlock:
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -395,8 +394,6 @@ static int igt_reset_nop(void *arg)
>   	reset_count = i915_reset_count(global);
>   	count = 0;
>   	do {
> -		mutex_lock(&gt->i915->drm.struct_mutex);
> -
>   		for_each_engine(engine, gt->i915, id) {
>   			int i;
>   
> @@ -417,7 +414,6 @@ static int igt_reset_nop(void *arg)
>   		intel_gt_reset(gt, ALL_ENGINES, NULL);
>   		igt_global_reset_unlock(gt);
>   
> -		mutex_unlock(&gt->i915->drm.struct_mutex);
>   		if (intel_gt_is_wedged(gt)) {
>   			err = -EIO;
>   			break;
> @@ -429,16 +425,13 @@ static int igt_reset_nop(void *arg)
>   			break;
>   		}
>   
> -		err = igt_flush_test(gt->i915, 0);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	} while (time_before(jiffies, end_time));
>   	pr_info("%s: %d resets\n", __func__, count);
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
> -	err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
> -
> +	err = igt_flush_test(gt->i915);
>   out:
>   	mock_file_free(gt->i915, file);
>   	if (intel_gt_is_wedged(gt))
> @@ -494,7 +487,6 @@ static int igt_reset_nop_engine(void *arg)
>   				break;
>   			}
>   
> -			mutex_lock(&gt->i915->drm.struct_mutex);
>   			for (i = 0; i < 16; i++) {
>   				struct i915_request *rq;
>   
> @@ -507,7 +499,6 @@ static int igt_reset_nop_engine(void *arg)
>   				i915_request_add(rq);
>   			}
>   			err = intel_engine_reset(engine, NULL);
> -			mutex_unlock(&gt->i915->drm.struct_mutex);
>   			if (err) {
>   				pr_err("i915_reset_engine failed\n");
>   				break;
> @@ -533,15 +524,12 @@ static int igt_reset_nop_engine(void *arg)
>   		if (err)
>   			break;
>   
> -		err = igt_flush_test(gt->i915, 0);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	}
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
> -	err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
> -
> +	err = igt_flush_test(gt->i915);
>   out:
>   	mock_file_free(gt->i915, file);
>   	if (intel_gt_is_wedged(gt))
> @@ -563,9 +551,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
>   		return 0;
>   
>   	if (active) {
> -		mutex_lock(&gt->i915->drm.struct_mutex);
>   		err = hang_init(&h, gt);
> -		mutex_unlock(&gt->i915->drm.struct_mutex);
>   		if (err)
>   			return err;
>   	}
> @@ -593,17 +579,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
>   			if (active) {
>   				struct i915_request *rq;
>   
> -				mutex_lock(&gt->i915->drm.struct_mutex);
>   				rq = hang_create_request(&h, engine);
>   				if (IS_ERR(rq)) {
>   					err = PTR_ERR(rq);
> -					mutex_unlock(&gt->i915->drm.struct_mutex);
>   					break;
>   				}
>   
>   				i915_request_get(rq);
>   				i915_request_add(rq);
> -				mutex_unlock(&gt->i915->drm.struct_mutex);
>   
>   				if (!wait_until_running(&h, rq)) {
>   					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
> @@ -647,7 +630,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
>   		if (err)
>   			break;
>   
> -		err = igt_flush_test(gt->i915, 0);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	}
> @@ -655,11 +638,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
>   	if (intel_gt_is_wedged(gt))
>   		err = -EIO;
>   
> -	if (active) {
> -		mutex_lock(&gt->i915->drm.struct_mutex);
> +	if (active)
>   		hang_fini(&h);
> -		mutex_unlock(&gt->i915->drm.struct_mutex);
> -	}
>   
>   	return err;
>   }
> @@ -741,10 +721,8 @@ static int active_engine(void *data)
>   		struct i915_request *old = rq[idx];
>   		struct i915_request *new;
>   
> -		mutex_lock(&engine->i915->drm.struct_mutex);
>   		new = igt_request_alloc(ctx[idx], engine);
>   		if (IS_ERR(new)) {
> -			mutex_unlock(&engine->i915->drm.struct_mutex);
>   			err = PTR_ERR(new);
>   			break;
>   		}
> @@ -755,7 +733,6 @@ static int active_engine(void *data)
>   
>   		rq[idx] = i915_request_get(new);
>   		i915_request_add(new);
> -		mutex_unlock(&engine->i915->drm.struct_mutex);
>   
>   		err = active_request_put(old);
>   		if (err)
> @@ -795,9 +772,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
>   		return 0;
>   
>   	if (flags & TEST_ACTIVE) {
> -		mutex_lock(&gt->i915->drm.struct_mutex);
>   		err = hang_init(&h, gt);
> -		mutex_unlock(&gt->i915->drm.struct_mutex);
>   		if (err)
>   			return err;
>   
> @@ -855,17 +830,14 @@ static int __igt_reset_engines(struct intel_gt *gt,
>   			struct i915_request *rq = NULL;
>   
>   			if (flags & TEST_ACTIVE) {
> -				mutex_lock(&gt->i915->drm.struct_mutex);
>   				rq = hang_create_request(&h, engine);
>   				if (IS_ERR(rq)) {
>   					err = PTR_ERR(rq);
> -					mutex_unlock(&gt->i915->drm.struct_mutex);
>   					break;
>   				}
>   
>   				i915_request_get(rq);
>   				i915_request_add(rq);
> -				mutex_unlock(&gt->i915->drm.struct_mutex);
>   
>   				if (!wait_until_running(&h, rq)) {
>   					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
> @@ -977,9 +949,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
>   		if (err)
>   			break;
>   
> -		mutex_lock(&gt->i915->drm.struct_mutex);
> -		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> -		mutex_unlock(&gt->i915->drm.struct_mutex);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	}
> @@ -987,11 +957,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
>   	if (intel_gt_is_wedged(gt))
>   		err = -EIO;
>   
> -	if (flags & TEST_ACTIVE) {
> -		mutex_lock(&gt->i915->drm.struct_mutex);
> +	if (flags & TEST_ACTIVE)
>   		hang_fini(&h);
> -		mutex_unlock(&gt->i915->drm.struct_mutex);
> -	}
>   
>   	return err;
>   }
> @@ -1061,7 +1028,6 @@ static int igt_reset_wait(void *arg)
>   
>   	igt_global_reset_lock(gt);
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
>   	err = hang_init(&h, gt);
>   	if (err)
>   		goto unlock;
> @@ -1109,7 +1075,6 @@ static int igt_reset_wait(void *arg)
>   fini:
>   	hang_fini(&h);
>   unlock:
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
>   	igt_global_reset_unlock(gt);
>   
>   	if (intel_gt_is_wedged(gt))
> @@ -1189,10 +1154,9 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
>   
>   	/* Check that we can recover an unbind stuck on a hanging request */
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
>   	err = hang_init(&h, gt);
>   	if (err)
> -		goto unlock;
> +		return err;
>   
>   	obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
>   	if (IS_ERR(obj)) {
> @@ -1255,8 +1219,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
>   	if (err)
>   		goto out_rq;
>   
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
> -
>   	if (!wait_until_running(&h, rq)) {
>   		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
>   
> @@ -1305,16 +1267,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
>   		put_task_struct(tsk);
>   	}
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
>   out_rq:
>   	i915_request_put(rq);
>   out_obj:
>   	i915_gem_object_put(obj);
>   fini:
>   	hang_fini(&h);
> -unlock:
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
> -
>   	if (intel_gt_is_wedged(gt))
>   		return -EIO;
>   
> @@ -1396,7 +1354,6 @@ static int igt_reset_queue(void *arg)
>   
>   	igt_global_reset_lock(gt);
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
>   	err = hang_init(&h, gt);
>   	if (err)
>   		goto unlock;
> @@ -1511,7 +1468,7 @@ static int igt_reset_queue(void *arg)
>   
>   		i915_request_put(prev);
>   
> -		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		err = igt_flush_test(gt->i915);
>   		if (err)
>   			break;
>   	}
> @@ -1519,7 +1476,6 @@ static int igt_reset_queue(void *arg)
>   fini:
>   	hang_fini(&h);
>   unlock:
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
>   	igt_global_reset_unlock(gt);
>   
>   	if (intel_gt_is_wedged(gt))
> @@ -1546,11 +1502,9 @@ static int igt_handle_error(void *arg)
>   	if (!engine || !intel_engine_can_store_dword(engine))
>   		return 0;
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
> -
>   	err = hang_init(&h, gt);
>   	if (err)
> -		goto err_unlock;
> +		return err;
>   
>   	rq = hang_create_request(&h, engine);
>   	if (IS_ERR(rq)) {
> @@ -1574,8 +1528,6 @@ static int igt_handle_error(void *arg)
>   		goto err_request;
>   	}
>   
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
> -
>   	/* Temporarily disable error capture */
>   	error = xchg(&global->first_error, (void *)-1);
>   
> @@ -1583,8 +1535,6 @@ static int igt_handle_error(void *arg)
>   
>   	xchg(&global->first_error, error);
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
> -
>   	if (rq->fence.error != -EIO) {
>   		pr_err("Guilty request not identified!\n");
>   		err = -EINVAL;
> @@ -1595,8 +1545,6 @@ static int igt_handle_error(void *arg)
>   	i915_request_put(rq);
>   err_fini:
>   	hang_fini(&h);
> -err_unlock:
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -1689,7 +1637,6 @@ static int igt_reset_engines_atomic(void *arg)
>   		return 0;
>   
>   	igt_global_reset_lock(gt);
> -	mutex_lock(&gt->i915->drm.struct_mutex);
>   
>   	/* Flush any requests before we get started and check basics */
>   	if (!igt_force_reset(gt))
> @@ -1709,9 +1656,7 @@ static int igt_reset_engines_atomic(void *arg)
>   out:
>   	/* As we poke around the guts, do a full reset before continuing. */
>   	igt_force_reset(gt);
> -
>   unlock:
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
>   	igt_global_reset_unlock(gt);
>   
>   	return err;
> @@ -1751,10 +1696,6 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
>   
>   	err = intel_gt_live_subtests(tests, gt);
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
> -	igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
> -
>   	i915_modparams.enable_hangcheck = saved_hangcheck;
>   	intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
>   
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index aca1b3a9c5de..222a7375c787 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -61,7 +61,7 @@ static int live_sanitycheck(void *arg)
>   		}
>   
>   		igt_spinner_end(&spin);
> -		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
> +		if (igt_flush_test(i915)) {
>   			err = -EIO;
>   			goto err_ctx;
>   		}
> @@ -206,8 +206,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
>   	if (err)
>   		goto out;
>   
> -	if (i915_request_wait(head,
> -			      I915_WAIT_LOCKED,
> +	if (i915_request_wait(head, 0,
>   			      2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
>   		pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
>   		       count, n);
> @@ -279,7 +278,7 @@ static int live_timeslice_preempt(void *arg)
>   			if (err)
>   				goto err_pin;
>   
> -			if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
> +			if (igt_flush_test(i915)) {
>   				err = -EIO;
>   				goto err_pin;
>   			}
> @@ -832,7 +831,7 @@ static int live_nopreempt(void *arg)
>   			goto err_wedged;
>   		}
>   
> -		if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +		if (igt_flush_test(i915))
>   			goto err_wedged;
>   	}
>   
> @@ -948,7 +947,7 @@ static int live_suppress_self_preempt(void *arg)
>   			goto err_client_b;
>   		}
>   
> -		if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +		if (igt_flush_test(i915))
>   			goto err_wedged;
>   	}
>   
> @@ -1109,7 +1108,7 @@ static int live_suppress_wait_preempt(void *arg)
>   			for (i = 0; i < ARRAY_SIZE(client); i++)
>   				igt_spinner_end(&client[i].spin);
>   
> -			if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +			if (igt_flush_test(i915))
>   				goto err_wedged;
>   
>   			if (engine->execlists.preempt_hang.count) {
> @@ -1388,7 +1387,7 @@ static int live_preempt_hang(void *arg)
>   
>   		igt_spinner_end(&spin_hi);
>   		igt_spinner_end(&spin_lo);
> -		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
> +		if (igt_flush_test(i915)) {
>   			err = -EIO;
>   			goto err_ctx_lo;
>   		}
> @@ -1785,7 +1784,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
>   		prime, div64_u64(ktime_to_ns(times[1]), prime));
>   
>   out:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	for (nc = 0; nc < nctx; nc++) {
> @@ -1930,7 +1929,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
>   		goto out;
>   
>   out:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	for (n = 0; n < nsibling; n++)
> @@ -2108,7 +2107,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
>   out:
>   	for (n = 0; !IS_ERR(rq[n]); n++)
>   		i915_request_put(rq[n]);
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	kernel_context_close(ctx);
> diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
> index 321481403165..16abfabf08c7 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
> @@ -6,7 +6,7 @@
>   
>   #include <linux/prime_numbers.h>
>   
> -#include "gem/i915_gem_pm.h"
> +#include "intel_engine_pm.h"
>   #include "intel_gt.h"
>   
>   #include "../selftests/i915_random.h"
> @@ -136,7 +136,6 @@ static int mock_hwsp_freelist(void *arg)
>   		goto err_put;
>   	}
>   
> -	mutex_lock(&state.i915->drm.struct_mutex);
>   	for (p = phases; p->name; p++) {
>   		pr_debug("%s(%s)\n", __func__, p->name);
>   		for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
> @@ -149,7 +148,6 @@ static int mock_hwsp_freelist(void *arg)
>   out:
>   	for (na = 0; na < state.max; na++)
>   		__mock_hwsp_record(&state, na, NULL);
> -	mutex_unlock(&state.i915->drm.struct_mutex);
>   	kfree(state.history);
>   err_put:
>   	drm_dev_put(&state.i915->drm);
> @@ -449,8 +447,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
>   	struct i915_request *rq;
>   	int err;
>   
> -	lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
> -
>   	err = intel_timeline_pin(tl);
>   	if (err) {
>   		rq = ERR_PTR(err);
> @@ -461,10 +457,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
>   	if (IS_ERR(rq))
>   		goto out_unpin;
>   
> +	i915_request_get(rq);
> +
>   	err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
>   	i915_request_add(rq);
> -	if (err)
> +	if (err) {
> +		i915_request_put(rq);
>   		rq = ERR_PTR(err);
> +	}
>   
>   out_unpin:
>   	intel_timeline_unpin(tl);
> @@ -500,7 +500,6 @@ static int live_hwsp_engine(void *arg)
>   	struct intel_timeline **timelines;
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
> -	intel_wakeref_t wakeref;
>   	unsigned long count, n;
>   	int err = 0;
>   
> @@ -515,14 +514,13 @@ static int live_hwsp_engine(void *arg)
>   	if (!timelines)
>   		return -ENOMEM;
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	count = 0;
>   	for_each_engine(engine, i915, id) {
>   		if (!intel_engine_can_store_dword(engine))
>   			continue;
>   
> +		intel_engine_pm_get(engine);
> +
>   		for (n = 0; n < NUM_TIMELINES; n++) {
>   			struct intel_timeline *tl;
>   			struct i915_request *rq;
> @@ -530,22 +528,26 @@ static int live_hwsp_engine(void *arg)
>   			tl = checked_intel_timeline_create(i915);
>   			if (IS_ERR(tl)) {
>   				err = PTR_ERR(tl);
> -				goto out;
> +				break;
>   			}
>   
>   			rq = tl_write(tl, engine, count);
>   			if (IS_ERR(rq)) {
>   				intel_timeline_put(tl);
>   				err = PTR_ERR(rq);
> -				goto out;
> +				break;
>   			}
>   
>   			timelines[count++] = tl;
> +			i915_request_put(rq);

This was a leak until now?

>   		}
> +
> +		intel_engine_pm_put(engine);
> +		if (err)
> +			break;
>   	}
>   
> -out:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	for (n = 0; n < count; n++) {
> @@ -559,11 +561,7 @@ static int live_hwsp_engine(void *arg)
>   		intel_timeline_put(tl);
>   	}
>   
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	kvfree(timelines);
> -
>   	return err;
>   #undef NUM_TIMELINES
>   }
> @@ -575,7 +573,6 @@ static int live_hwsp_alternate(void *arg)
>   	struct intel_timeline **timelines;
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
> -	intel_wakeref_t wakeref;
>   	unsigned long count, n;
>   	int err = 0;
>   
> @@ -591,9 +588,6 @@ static int live_hwsp_alternate(void *arg)
>   	if (!timelines)
>   		return -ENOMEM;
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	count = 0;
>   	for (n = 0; n < NUM_TIMELINES; n++) {
>   		for_each_engine(engine, i915, id) {
> @@ -605,11 +599,14 @@ static int live_hwsp_alternate(void *arg)
>   
>   			tl = checked_intel_timeline_create(i915);
>   			if (IS_ERR(tl)) {
> +				intel_engine_pm_put(engine);
>   				err = PTR_ERR(tl);
>   				goto out;
>   			}
>   
> +			intel_engine_pm_get(engine);
>   			rq = tl_write(tl, engine, count);
> +			intel_engine_pm_put(engine);
>   			if (IS_ERR(rq)) {
>   				intel_timeline_put(tl);
>   				err = PTR_ERR(rq);
> @@ -617,11 +614,12 @@ static int live_hwsp_alternate(void *arg)
>   			}
>   
>   			timelines[count++] = tl;
> +			i915_request_put(rq);

And this.

>   		}
>   	}
>   
>   out:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	for (n = 0; n < count; n++) {
> @@ -635,11 +633,7 @@ static int live_hwsp_alternate(void *arg)
>   		intel_timeline_put(tl);
>   	}
>   
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	kvfree(timelines);
> -
>   	return err;
>   #undef NUM_TIMELINES
>   }
> @@ -650,7 +644,6 @@ static int live_hwsp_wrap(void *arg)
>   	struct intel_engine_cs *engine;
>   	struct intel_timeline *tl;
>   	enum intel_engine_id id;
> -	intel_wakeref_t wakeref;
>   	int err = 0;
>   
>   	/*
> @@ -658,14 +651,10 @@ static int live_hwsp_wrap(void *arg)
>   	 * foreign GPU references.
>   	 */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	tl = intel_timeline_create(&i915->gt, NULL);
> -	if (IS_ERR(tl)) {
> -		err = PTR_ERR(tl);
> -		goto out_rpm;
> -	}
> +	if (IS_ERR(tl))
> +		return PTR_ERR(tl);
> +
>   	if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
>   		goto out_free;
>   
> @@ -681,7 +670,9 @@ static int live_hwsp_wrap(void *arg)
>   		if (!intel_engine_can_store_dword(engine))
>   			continue;
>   
> +		intel_engine_pm_get(engine);
>   		rq = i915_request_create(engine->kernel_context);
> +		intel_engine_pm_put(engine);
>   		if (IS_ERR(rq)) {
>   			err = PTR_ERR(rq);
>   			goto out;
> @@ -747,16 +738,12 @@ static int live_hwsp_wrap(void *arg)
>   	}
>   

No i915_request_put in this one and I can't see why it would be different.

>   out:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	intel_timeline_unpin(tl);
>   out_free:
>   	intel_timeline_put(tl);
> -out_rpm:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	return err;
>   }
>   
> @@ -765,7 +752,6 @@ static int live_hwsp_recycle(void *arg)
>   	struct drm_i915_private *i915 = arg;
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
> -	intel_wakeref_t wakeref;
>   	unsigned long count;
>   	int err = 0;
>   
> @@ -775,9 +761,6 @@ static int live_hwsp_recycle(void *arg)
>   	 * want to confuse ourselves or the GPU.
>   	 */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	count = 0;
>   	for_each_engine(engine, i915, id) {
>   		IGT_TIMEOUT(end_time);
> @@ -785,6 +768,8 @@ static int live_hwsp_recycle(void *arg)
>   		if (!intel_engine_can_store_dword(engine))
>   			continue;
>   
> +		intel_engine_pm_get(engine);
> +
>   		do {
>   			struct intel_timeline *tl;
>   			struct i915_request *rq;
> @@ -792,21 +777,22 @@ static int live_hwsp_recycle(void *arg)
>   			tl = checked_intel_timeline_create(i915);
>   			if (IS_ERR(tl)) {
>   				err = PTR_ERR(tl);
> -				goto out;
> +				break;
>   			}
>   
>   			rq = tl_write(tl, engine, count);
>   			if (IS_ERR(rq)) {
>   				intel_timeline_put(tl);
>   				err = PTR_ERR(rq);
> -				goto out;
> +				break;
>   			}
>   
>   			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
>   				pr_err("Wait for timeline writes timed out!\n");
> +				i915_request_put(rq);
>   				intel_timeline_put(tl);
>   				err = -EIO;
> -				goto out;
> +				break;
>   			}
>   
>   			if (*tl->hwsp_seqno != count) {
> @@ -815,17 +801,18 @@ static int live_hwsp_recycle(void *arg)
>   				err = -EINVAL;
>   			}
>   
> +			i915_request_put(rq);
>   			intel_timeline_put(tl);
>   			count++;
>   
>   			if (err)
> -				goto out;
> +				break;
>   		} while (!__igt_timeout(end_time, NULL));
> -	}
>   
> -out:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
> +		intel_engine_pm_put(engine);
> +		if (err)
> +			break;
> +	}
>   
>   	return err;
>   }
> diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> index 999a98f00494..06351fefbbf3 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> @@ -676,7 +676,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
>   			break;
>   	}
>   
> -	if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(ctx->i915))
>   		err = -EIO;
>   out_batch:
>   	i915_vma_unpin_and_release(&batch, 0);
> @@ -1090,7 +1090,7 @@ static int live_isolated_whitelist(void *arg)
>   		kernel_context_close(client[i].ctx);
>   	}
>   
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   
>   	return err;
> @@ -1248,7 +1248,7 @@ live_engine_reset_workarounds(void *arg)
>   	igt_global_reset_unlock(&i915->gt);
>   	kernel_context_close(ctx);
>   
> -	igt_flush_test(i915, I915_WAIT_LOCKED);
> +	igt_flush_test(i915);
>   
>   	return ret;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 30b0b592e20d..55f0fc03aa3e 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -3601,6 +3601,7 @@ static int
>   i915_drop_caches_set(void *data, u64 val)
>   {
>   	struct drm_i915_private *i915 = data;
> +	int ret;
>   
>   	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
>   		  val, val & DROP_ALL);
> @@ -3610,40 +3611,21 @@ i915_drop_caches_set(void *data, u64 val)
>   		     I915_IDLE_ENGINES_TIMEOUT))
>   		intel_gt_set_wedged(&i915->gt);
>   
> -	/* No need to check and wait for gpu resets, only libdrm auto-restarts
> -	 * on ioctls on -EAGAIN. */
> -	if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
> -		int ret;
> +	if (val & DROP_RETIRE)
> +		i915_retire_requests(i915);
>   
> -		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
> +	if (val & (DROP_IDLE | DROP_ACTIVE)) {
> +		ret = i915_gem_wait_for_idle(i915,
> +					     I915_WAIT_INTERRUPTIBLE,
> +					     MAX_SCHEDULE_TIMEOUT);
>   		if (ret)
>   			return ret;
> +	}
>   
> -		/*
> -		 * To finish the flush of the idle_worker, we must complete
> -		 * the switch-to-kernel-context, which requires a double
> -		 * pass through wait_for_idle: first queues the switch,
> -		 * second waits for the switch.
> -		 */
> -		if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
> -			ret = i915_gem_wait_for_idle(i915,
> -						     I915_WAIT_INTERRUPTIBLE |
> -						     I915_WAIT_LOCKED,
> -						     MAX_SCHEDULE_TIMEOUT);
> -
> -		if (ret == 0 && val & DROP_IDLE)
> -			ret = i915_gem_wait_for_idle(i915,
> -						     I915_WAIT_INTERRUPTIBLE |
> -						     I915_WAIT_LOCKED,
> -						     MAX_SCHEDULE_TIMEOUT);
> -
> -		if (val & DROP_RETIRE)
> -			i915_retire_requests(i915);
> -
> -		mutex_unlock(&i915->drm.struct_mutex);
> -
> -		if (ret == 0 && val & DROP_IDLE)
> -			ret = intel_gt_pm_wait_for_idle(&i915->gt);
> +	if (val & DROP_IDLE) {
> +		ret = intel_gt_pm_wait_for_idle(&i915->gt);
> +		if (ret)
> +			return ret;
>   	}
>   
>   	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 1aadab1cdd24..225fd22af858 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -951,19 +951,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
>   	if (!intel_gt_pm_is_awake(&i915->gt))
>   		return 0;
>   
> -	GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
> -		  flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
> -		  timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
> -
> -	timeout = wait_for_timelines(i915, flags, timeout);
> -	if (timeout < 0)
> -		return timeout;
> +	do {
> +		timeout = wait_for_timelines(i915, flags, timeout);
> +		if (timeout < 0)
> +			return timeout;
>   
> -	if (flags & I915_WAIT_LOCKED) {
> -		lockdep_assert_held(&i915->drm.struct_mutex);
> +		cond_resched();
> +		if (signal_pending(current))
> +			return -EINTR;
>   
> -		i915_retire_requests(i915);
> -	}
> +	} while (i915_retire_requests(i915));
>   
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 3251d2bdbeea..57a2193c64d1 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -308,10 +308,9 @@ long i915_request_wait(struct i915_request *rq,
>   		       long timeout)
>   	__attribute__((nonnull(1)));
>   #define I915_WAIT_INTERRUPTIBLE	BIT(0)
> -#define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
> -#define I915_WAIT_PRIORITY	BIT(2) /* small priority bump for the request */
> -#define I915_WAIT_ALL		BIT(3) /* used by i915_gem_object_wait() */
> -#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
> +#define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
> +#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
> +#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
>   
>   static inline bool i915_request_signaled(const struct i915_request *rq)
>   {
> diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
> index af5827aac7b2..ff1337e34522 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_active.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_active.c
> @@ -164,10 +164,8 @@ static int live_active_wait(void *arg)
>   
>   	__live_put(active);
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	return err;
>   }
> @@ -185,10 +183,8 @@ static int live_active_retire(void *arg)
>   		return PTR_ERR(active);
>   
>   	/* waits for & retires all requests */
> -	mutex_lock(&i915->drm.struct_mutex);
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	if (!READ_ONCE(active->retired)) {
>   		pr_err("i915_active not retired after flushing!\n");
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> index ba6064147173..42139db0d69c 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> @@ -521,7 +521,7 @@ static int igt_evict_contexts(void *arg)
>   
>   	mutex_lock(&i915->ggtt.vm.mutex);
>   out_locked:
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
>   	while (reserved) {
>   		struct reserved *next = reserved->next;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> index 4235fa401956..729a53eb6244 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> @@ -1709,12 +1709,8 @@ int i915_gem_gtt_mock_selftests(void)
>   
>   	err = i915_subtests(tests, ggtt);
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	mock_device_flush(i915);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	i915_gem_drain_freed_objects(i915);
> -
>   	mock_fini_ggtt(ggtt);
>   	kfree(ggtt);
>   out_put:
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index b3688543ed7d..d046395845da 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -41,21 +41,16 @@ static int igt_add_request(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
>   	struct i915_request *request;
> -	int err = -ENOMEM;
>   
>   	/* Basic preliminary test to create a request and let it loose! */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
>   	if (!request)
> -		goto out_unlock;
> +		return -ENOMEM;
>   
>   	i915_request_add(request);
>   
> -	err = 0;
> -out_unlock:
> -	mutex_unlock(&i915->drm.struct_mutex);
> -	return err;
> +	return 0;
>   }
>   
>   static int igt_wait_request(void *arg)
> @@ -67,12 +62,10 @@ static int igt_wait_request(void *arg)
>   
>   	/* Submit a request, then wait upon it */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	request = mock_request(i915->engine[RCS0]->kernel_context, T);
> -	if (!request) {
> -		err = -ENOMEM;
> -		goto out_unlock;
> -	}
> +	if (!request)
> +		return -ENOMEM;
> +
>   	i915_request_get(request);
>   
>   	if (i915_request_wait(request, 0, 0) != -ETIME) {
> @@ -125,9 +118,7 @@ static int igt_wait_request(void *arg)
>   	err = 0;
>   out_request:
>   	i915_request_put(request);
> -out_unlock:
>   	mock_device_flush(i915);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -140,52 +131,45 @@ static int igt_fence_wait(void *arg)
>   
>   	/* Submit a request, treat it as a fence and wait upon it */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	request = mock_request(i915->engine[RCS0]->kernel_context, T);
> -	if (!request) {
> -		err = -ENOMEM;
> -		goto out_locked;
> -	}
> +	if (!request)
> +		return -ENOMEM;
>   
>   	if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
>   		pr_err("fence wait success before submit (expected timeout)!\n");
> -		goto out_locked;
> +		goto out;
>   	}
>   
>   	i915_request_add(request);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	if (dma_fence_is_signaled(&request->fence)) {
>   		pr_err("fence signaled immediately!\n");
> -		goto out_device;
> +		goto out;
>   	}
>   
>   	if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
>   		pr_err("fence wait success after submit (expected timeout)!\n");
> -		goto out_device;
> +		goto out;
>   	}
>   
>   	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
>   		pr_err("fence wait timed out (expected success)!\n");
> -		goto out_device;
> +		goto out;
>   	}
>   
>   	if (!dma_fence_is_signaled(&request->fence)) {
>   		pr_err("fence unsignaled after waiting!\n");
> -		goto out_device;
> +		goto out;
>   	}
>   
>   	if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
>   		pr_err("fence wait timed out when complete (expected success)!\n");
> -		goto out_device;
> +		goto out;
>   	}
>   
>   	err = 0;
> -out_device:
> -	mutex_lock(&i915->drm.struct_mutex);
> -out_locked:
> +out:
>   	mock_device_flush(i915);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -199,6 +183,8 @@ static int igt_request_rewind(void *arg)
>   
>   	mutex_lock(&i915->drm.struct_mutex);
>   	ctx[0] = mock_context(i915, "A");
> +	mutex_unlock(&i915->drm.struct_mutex);
> +
>   	ce = i915_gem_context_get_engine(ctx[0], RCS0);
>   	GEM_BUG_ON(IS_ERR(ce));
>   	request = mock_request(ce, 2 * HZ);
> @@ -211,7 +197,10 @@ static int igt_request_rewind(void *arg)
>   	i915_request_get(request);
>   	i915_request_add(request);
>   
> +	mutex_lock(&i915->drm.struct_mutex);
>   	ctx[1] = mock_context(i915, "B");
> +	mutex_unlock(&i915->drm.struct_mutex);
> +
>   	ce = i915_gem_context_get_engine(ctx[1], RCS0);
>   	GEM_BUG_ON(IS_ERR(ce));
>   	vip = mock_request(ce, 0);
> @@ -233,7 +222,6 @@ static int igt_request_rewind(void *arg)
>   	request->engine->submit_request(request);
>   	rcu_read_unlock();
>   
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	if (i915_request_wait(vip, 0, HZ) == -ETIME) {
>   		pr_err("timed out waiting for high priority request\n");
> @@ -248,14 +236,12 @@ static int igt_request_rewind(void *arg)
>   	err = 0;
>   err:
>   	i915_request_put(vip);
> -	mutex_lock(&i915->drm.struct_mutex);
>   err_context_1:
>   	mock_context_close(ctx[1]);
>   	i915_request_put(request);
>   err_context_0:
>   	mock_context_close(ctx[0]);
>   	mock_device_flush(i915);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -282,7 +268,6 @@ __live_request_alloc(struct intel_context *ce)
>   static int __igt_breadcrumbs_smoketest(void *arg)
>   {
>   	struct smoketest *t = arg;
> -	struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
>   	const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
>   	const unsigned int total = 4 * t->ncontexts + 1;
>   	unsigned int num_waits = 0, num_fences = 0;
> @@ -337,14 +322,11 @@ static int __igt_breadcrumbs_smoketest(void *arg)
>   			struct i915_request *rq;
>   			struct intel_context *ce;
>   
> -			mutex_lock(BKL);
> -
>   			ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
>   			GEM_BUG_ON(IS_ERR(ce));
>   			rq = t->request_alloc(ce);
>   			intel_context_put(ce);
>   			if (IS_ERR(rq)) {
> -				mutex_unlock(BKL);
>   				err = PTR_ERR(rq);
>   				count = n;
>   				break;
> @@ -357,8 +339,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
>   			requests[n] = i915_request_get(rq);
>   			i915_request_add(rq);
>   
> -			mutex_unlock(BKL);
> -
>   			if (err >= 0)
>   				err = i915_sw_fence_await_dma_fence(wait,
>   								    &rq->fence,
> @@ -457,15 +437,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
>   		goto out_threads;
>   	}
>   
> -	mutex_lock(&t.engine->i915->drm.struct_mutex);
>   	for (n = 0; n < t.ncontexts; n++) {
> +		mutex_lock(&t.engine->i915->drm.struct_mutex);
>   		t.contexts[n] = mock_context(t.engine->i915, "mock");
> +		mutex_unlock(&t.engine->i915->drm.struct_mutex);
>   		if (!t.contexts[n]) {
>   			ret = -ENOMEM;
>   			goto out_contexts;
>   		}
>   	}
> -	mutex_unlock(&t.engine->i915->drm.struct_mutex);
>   
>   	for (n = 0; n < ncpus; n++) {
>   		threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
> @@ -495,18 +475,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
>   		atomic_long_read(&t.num_fences),
>   		ncpus);
>   
> -	mutex_lock(&t.engine->i915->drm.struct_mutex);
>   out_contexts:
>   	for (n = 0; n < t.ncontexts; n++) {
>   		if (!t.contexts[n])
>   			break;
>   		mock_context_close(t.contexts[n]);
>   	}
> -	mutex_unlock(&t.engine->i915->drm.struct_mutex);
>   	kfree(t.contexts);
>   out_threads:
>   	kfree(threads);
> -
>   	return ret;
>   }
>   
> @@ -539,7 +516,6 @@ static int live_nop_request(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
>   	struct intel_engine_cs *engine;
> -	intel_wakeref_t wakeref;
>   	struct igt_live_test t;
>   	unsigned int id;
>   	int err = -ENODEV;
> @@ -549,9 +525,6 @@ static int live_nop_request(void *arg)
>   	 * the overhead of submitting requests to the hardware.
>   	 */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	for_each_engine(engine, i915, id) {
>   		struct i915_request *request = NULL;
>   		unsigned long n, prime;
> @@ -560,17 +533,15 @@ static int live_nop_request(void *arg)
>   
>   		err = igt_live_test_begin(&t, i915, __func__, engine->name);
>   		if (err)
> -			goto out_unlock;
> +			return err;
>   
>   		for_each_prime_number_from(prime, 1, 8192) {
>   			times[1] = ktime_get_raw();
>   
>   			for (n = 0; n < prime; n++) {
>   				request = i915_request_create(engine->kernel_context);
> -				if (IS_ERR(request)) {
> -					err = PTR_ERR(request);
> -					goto out_unlock;
> -				}
> +				if (IS_ERR(request))
> +					return PTR_ERR(request);
>   
>   				/* This space is left intentionally blank.
>   				 *
> @@ -599,7 +570,7 @@ static int live_nop_request(void *arg)
>   
>   		err = igt_live_test_end(&t);
>   		if (err)
> -			goto out_unlock;
> +			return err;
>   
>   		pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
>   			engine->name,
> @@ -607,9 +578,6 @@ static int live_nop_request(void *arg)
>   			prime, div64_u64(ktime_to_ns(times[1]), prime));
>   	}
>   
> -out_unlock:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -681,7 +649,6 @@ static int live_empty_request(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
>   	struct intel_engine_cs *engine;
> -	intel_wakeref_t wakeref;
>   	struct igt_live_test t;
>   	struct i915_vma *batch;
>   	unsigned int id;
> @@ -692,14 +659,9 @@ static int live_empty_request(void *arg)
>   	 * the overhead of submitting requests to the hardware.
>   	 */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	batch = empty_batch(i915);
> -	if (IS_ERR(batch)) {
> -		err = PTR_ERR(batch);
> -		goto out_unlock;
> -	}
> +	if (IS_ERR(batch))
> +		return PTR_ERR(batch);
>   
>   	for_each_engine(engine, i915, id) {
>   		IGT_TIMEOUT(end_time);
> @@ -752,9 +714,6 @@ static int live_empty_request(void *arg)
>   out_batch:
>   	i915_vma_unpin(batch);
>   	i915_vma_put(batch);
> -out_unlock:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -834,7 +793,6 @@ static int live_all_engines(void *arg)
>   	struct drm_i915_private *i915 = arg;
>   	struct intel_engine_cs *engine;
>   	struct i915_request *request[I915_NUM_ENGINES];
> -	intel_wakeref_t wakeref;
>   	struct igt_live_test t;
>   	struct i915_vma *batch;
>   	unsigned int id;
> @@ -845,18 +803,15 @@ static int live_all_engines(void *arg)
>   	 * block doing so, and that they don't complete too soon.
>   	 */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	err = igt_live_test_begin(&t, i915, __func__, "");
>   	if (err)
> -		goto out_unlock;
> +		return err;
>   
>   	batch = recursive_batch(i915);
>   	if (IS_ERR(batch)) {
>   		err = PTR_ERR(batch);
>   		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
> -		goto out_unlock;
> +		return err;
>   	}
>   
>   	for_each_engine(engine, i915, id) {
> @@ -926,9 +881,6 @@ static int live_all_engines(void *arg)
>   			i915_request_put(request[id]);
>   	i915_vma_unpin(batch);
>   	i915_vma_put(batch);
> -out_unlock:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -938,7 +890,6 @@ static int live_sequential_engines(void *arg)
>   	struct i915_request *request[I915_NUM_ENGINES] = {};
>   	struct i915_request *prev = NULL;
>   	struct intel_engine_cs *engine;
> -	intel_wakeref_t wakeref;
>   	struct igt_live_test t;
>   	unsigned int id;
>   	int err;
> @@ -949,12 +900,9 @@ static int live_sequential_engines(void *arg)
>   	 * they are running on independent engines.
>   	 */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	err = igt_live_test_begin(&t, i915, __func__, "");
>   	if (err)
> -		goto out_unlock;
> +		return err;
>   
>   	for_each_engine(engine, i915, id) {
>   		struct i915_vma *batch;
> @@ -964,7 +912,7 @@ static int live_sequential_engines(void *arg)
>   			err = PTR_ERR(batch);
>   			pr_err("%s: Unable to create batch for %s, err=%d\n",
>   			       __func__, engine->name, err);
> -			goto out_unlock;
> +			return err;
>   		}
>   
>   		request[id] = i915_request_create(engine->kernel_context);
> @@ -1056,9 +1004,6 @@ static int live_sequential_engines(void *arg)
>   		i915_vma_put(request[id]->batch);
>   		i915_request_put(request[id]);
>   	}
> -out_unlock:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	return err;
>   }
>   
> @@ -1149,9 +1094,10 @@ static int live_breadcrumbs_smoketest(void *arg)
>   		goto out_threads;
>   	}
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	for (n = 0; n < t[0].ncontexts; n++) {
> +		mutex_lock(&i915->drm.struct_mutex);
>   		t[0].contexts[n] = live_context(i915, file);
> +		mutex_unlock(&i915->drm.struct_mutex);
>   		if (!t[0].contexts[n]) {
>   			ret = -ENOMEM;
>   			goto out_contexts;
> @@ -1168,7 +1114,6 @@ static int live_breadcrumbs_smoketest(void *arg)
>   		t[id].max_batch = max_batches(t[0].contexts[0], engine);
>   		if (t[id].max_batch < 0) {
>   			ret = t[id].max_batch;
> -			mutex_unlock(&i915->drm.struct_mutex);
>   			goto out_flush;
>   		}
>   		/* One ring interleaved between requests from all cpus */
> @@ -1183,7 +1128,6 @@ static int live_breadcrumbs_smoketest(void *arg)
>   					  &t[id], "igt/%d.%d", id, n);
>   			if (IS_ERR(tsk)) {
>   				ret = PTR_ERR(tsk);
> -				mutex_unlock(&i915->drm.struct_mutex);
>   				goto out_flush;
>   			}
>   
> @@ -1191,7 +1135,6 @@ static int live_breadcrumbs_smoketest(void *arg)
>   			threads[id * ncpus + n] = tsk;
>   		}
>   	}
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
>   
> @@ -1219,10 +1162,8 @@ static int live_breadcrumbs_smoketest(void *arg)
>   	pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
>   		num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	ret = igt_live_test_end(&live) ?: ret;
>   out_contexts:
> -	mutex_unlock(&i915->drm.struct_mutex);
>   	kfree(t[0].contexts);
>   out_threads:
>   	kfree(threads);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
> index 438ea0eaa416..825a8286cbe8 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
> @@ -263,10 +263,8 @@ int __i915_live_teardown(int err, void *data)
>   {
>   	struct drm_i915_private *i915 = data;
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		err = -EIO;
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	i915_gem_drain_freed_objects(i915);
>   
> @@ -284,10 +282,8 @@ int __intel_gt_live_teardown(int err, void *data)
>   {
>   	struct intel_gt *gt = data;
>   
> -	mutex_lock(&gt->i915->drm.struct_mutex);
> -	if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(gt->i915))
>   		err = -EIO;
> -	mutex_unlock(&gt->i915->drm.struct_mutex);
>   
>   	i915_gem_drain_freed_objects(gt->i915);
>   
> diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
> index e0ca12c17a7f..6c1705058b93 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_vma.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
> @@ -835,12 +835,8 @@ int i915_vma_mock_selftests(void)
>   
>   	err = i915_subtests(tests, ggtt);
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	mock_device_flush(i915);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	i915_gem_drain_freed_objects(i915);
> -
>   	mock_fini_ggtt(ggtt);
>   	kfree(ggtt);
>   out_put:
> diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> index d3b5eb402d33..2a5fbe46ea9f 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
> @@ -12,31 +12,25 @@
>   
>   #include "igt_flush_test.h"
>   
> -int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
> +int igt_flush_test(struct drm_i915_private *i915)
>   {
>   	int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
> -	int repeat = !!(flags & I915_WAIT_LOCKED);
>   
>   	cond_resched();
>   
> -	do {
> -		if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
> -			pr_err("%pS timed out, cancelling all further testing.\n",
> -			       __builtin_return_address(0));
> +	i915_retire_requests(i915);

Do you need this one or it would be better without, for clarity, given 
there is one at the end? i915_gem_wait_for_idle will retire all it can.

> +	if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
> +		pr_err("%pS timed out, cancelling all further testing.\n",
> +		       __builtin_return_address(0));
>   
> -			GEM_TRACE("%pS timed out.\n",
> -				  __builtin_return_address(0));
> -			GEM_TRACE_DUMP();
> +		GEM_TRACE("%pS timed out.\n",
> +			  __builtin_return_address(0));
> +		GEM_TRACE_DUMP();
>   
> -			intel_gt_set_wedged(&i915->gt);
> -			repeat = 0;
> -			ret = -EIO;
> -		}
> -
> -		/* Ensure we also flush after wedging. */
> -		if (flags & I915_WAIT_LOCKED)
> -			i915_retire_requests(i915);
> -	} while (repeat--);
> +		intel_gt_set_wedged(&i915->gt);
> +		ret = -EIO;
> +	}
> +	i915_retire_requests(i915);
>   
>   	return ret;
>   }
> diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
> index 63e009927c43..7541fa74e641 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.h
> +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
> @@ -9,6 +9,6 @@
>   
>   struct drm_i915_private;
>   
> -int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
> +int igt_flush_test(struct drm_i915_private *i915);
>   
>   #endif /* IGT_FLUSH_TEST_H */
> diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
> index 3e902761cd16..04a6f88fdf64 100644
> --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
> +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
> @@ -19,15 +19,12 @@ int igt_live_test_begin(struct igt_live_test *t,
>   	enum intel_engine_id id;
>   	int err;
>   
> -	lockdep_assert_held(&i915->drm.struct_mutex);
> -
>   	t->i915 = i915;
>   	t->func = func;
>   	t->name = name;
>   
>   	err = i915_gem_wait_for_idle(i915,
> -				     I915_WAIT_INTERRUPTIBLE |
> -				     I915_WAIT_LOCKED,
> +				     I915_WAIT_INTERRUPTIBLE,
>   				     MAX_SCHEDULE_TIMEOUT);
>   	if (err) {
>   		pr_err("%s(%s): failed to idle before, with err=%d!",
> @@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   
> -	lockdep_assert_held(&i915->drm.struct_mutex);
> -
> -	if (igt_flush_test(i915, I915_WAIT_LOCKED))
> +	if (igt_flush_test(i915))
>   		return -EIO;
>   
>   	if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index 01a89c071bf5..1956006a0d5b 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -41,8 +41,6 @@ void mock_device_flush(struct drm_i915_private *i915)
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   
> -	lockdep_assert_held(&i915->drm.struct_mutex);
> -
>   	do {
>   		for_each_engine(engine, i915, id)
>   			mock_engine_flush(engine);
> @@ -55,9 +53,7 @@ static void mock_device_release(struct drm_device *dev)
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   
> -	mutex_lock(&i915->drm.struct_mutex);
>   	mock_device_flush(i915);
> -	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	flush_work(&i915->gem.idle_work);
>   	i915_gem_drain_workqueue(i915);
> 

Essentially looks fine. Provisional, meaning keep it if you do some 
small tweaks:

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Regards,

Tvrtko


More information about the Intel-gfx mailing list