[Intel-gfx] [PATCH 15/21] drm/i915: Coordinate i915_active with its own mutex

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Fri Sep 20 16:14:43 UTC 2019


On 02/09/2019 05:02, Chris Wilson wrote:
> Forgo the struct_mutex serialisation for i915_active, and interpose its
> own mutex handling for active/retire.
> 
> This is a multi-layered sleight-of-hand. First, we had to ensure that no
> active/retire callbacks accidentally inverted the mutex ordering rules,
> nor assumed that they were themselves serialised by struct_mutex. More
> challenging though, is the rule over updating elements of the active
> rbtree. Instead of the whole i915_active now being serialised by
> struct_mutex, allocations/rotations of the tree are serialised by the
> i915_active.mutex and individual nodes are serialised by the caller
> using the i915_timeline.mutex (we need to use nested spinlocks to
> interact with the dma_fence callback lists).
> 
> The pain point here is that instead of a single mutex around execbuf, we
> now have to take a mutex for active tracker (one for each vma, context,
> etc) and a couple of spinlocks for each fence update. The improvement in
> fine grained locking allowing for multiple concurrent clients
> (eventually!) should be worth it in typical loads.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   .../gpu/drm/i915/display/intel_frontbuffer.c  |   2 +-
>   drivers/gpu/drm/i915/display/intel_overlay.c  |   5 +-
>   .../gpu/drm/i915/gem/i915_gem_client_blt.c    |   2 +-
>   drivers/gpu/drm/i915/gem/i915_gem_context.c   |   8 +-
>   .../gpu/drm/i915/gem/i915_gem_object_types.h  |   1 +
>   drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   9 +-
>   drivers/gpu/drm/i915/gt/intel_context.c       |   6 +-
>   drivers/gpu/drm/i915/gt/intel_engine_pool.c   |   2 +-
>   drivers/gpu/drm/i915/gt/intel_engine_pool.h   |   2 +-
>   drivers/gpu/drm/i915/gt/intel_reset.c         |  10 +-
>   drivers/gpu/drm/i915/gt/intel_timeline.c      |   9 +-
>   .../gpu/drm/i915/gt/intel_timeline_types.h    |   2 +-
>   drivers/gpu/drm/i915/gt/selftest_context.c    |  16 +-
>   drivers/gpu/drm/i915/gt/selftest_lrc.c        |  10 +-
>   .../gpu/drm/i915/gt/selftests/mock_timeline.c |   2 +-
>   drivers/gpu/drm/i915/gvt/scheduler.c          |   3 -
>   drivers/gpu/drm/i915/i915_active.c            | 291 +++++++---------
>   drivers/gpu/drm/i915/i915_active.h            | 318 ++++--------------
>   drivers/gpu/drm/i915/i915_active_types.h      |  23 +-
>   drivers/gpu/drm/i915/i915_gem.c               |  42 ++-
>   drivers/gpu/drm/i915/i915_gem_gtt.c           |   3 +-
>   drivers/gpu/drm/i915/i915_gpu_error.c         |   4 +-
>   drivers/gpu/drm/i915/i915_request.c           |  39 +--
>   drivers/gpu/drm/i915/i915_request.h           |   1 -
>   drivers/gpu/drm/i915/i915_vma.c               |   8 +-
>   drivers/gpu/drm/i915/selftests/i915_active.c  |  36 +-
>   26 files changed, 274 insertions(+), 580 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
> index 6428b8dd70d3..84b164f31895 100644
> --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
> +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
> @@ -257,7 +257,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
>   	front->obj = obj;
>   	kref_init(&front->ref);
>   	atomic_set(&front->bits, 0);
> -	i915_active_init(i915, &front->write,
> +	i915_active_init(&front->write,
>   			 frontbuffer_active,
>   			 i915_active_may_sleep(frontbuffer_retire));
>   
> diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
> index 4f36557b3f3b..544e953342ea 100644
> --- a/drivers/gpu/drm/i915/display/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/display/intel_overlay.c
> @@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
>   	if (IS_ERR(rq))
>   		return rq;
>   
> -	err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
> +	err = i915_active_ref(&overlay->last_flip, rq->timeline, &rq->fence);
>   	if (err) {
>   		i915_request_add(rq);
>   		return ERR_PTR(err);
> @@ -1360,8 +1360,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
>   	overlay->contrast = 75;
>   	overlay->saturation = 146;
>   
> -	i915_active_init(dev_priv,
> -			 &overlay->last_flip,
> +	i915_active_init(&overlay->last_flip,
>   			 NULL, intel_overlay_last_flip_retire);
>   
>   	ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> index 9e72b42a86f5..ace50bb9ee1f 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
> @@ -160,7 +160,7 @@ static int move_to_active(struct i915_vma *vma, struct i915_request *rq)
>   	if (err)
>   		return err;
>   
> -	return i915_active_ref(&vma->active, rq->timeline, rq);
> +	return i915_active_ref(&vma->active, rq->timeline, &rq->fence);
>   }
>   
>   static void clear_pages_worker(struct work_struct *work)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 4f303cb94698..b8ddcf7899a1 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -868,20 +868,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
>   				void (*task)(void *data),
>   				void *data)
>   {
> -	struct drm_i915_private *i915 = ctx->i915;
>   	struct context_barrier_task *cb;
>   	struct i915_gem_engines_iter it;
>   	struct intel_context *ce;
>   	int err = 0;
>   
> -	lockdep_assert_held(&i915->drm.struct_mutex);
>   	GEM_BUG_ON(!task);
>   
>   	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
>   	if (!cb)
>   		return -ENOMEM;
>   
> -	i915_active_init(i915, &cb->base, NULL, cb_retire);
> +	i915_active_init(&cb->base, NULL, cb_retire);
>   	err = i915_active_acquire(&cb->base);
>   	if (err) {
>   		kfree(cb);
> @@ -913,7 +911,9 @@ static int context_barrier_task(struct i915_gem_context *ctx,
>   		if (emit)
>   			err = emit(rq, data);
>   		if (err == 0)
> -			err = i915_active_ref(&cb->base, rq->timeline, rq);
> +			err = i915_active_ref(&cb->base,
> +					      rq->timeline,
> +					      &rq->fence);
>   
>   		i915_request_add(rq);
>   		if (err)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index b0550727e69a..03e1e3206ab3 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -8,6 +8,7 @@
>   #define __I915_GEM_OBJECT_TYPES_H__
>   
>   #include <drm/drm_gem.h>
> +#include <uapi/drm/i915_drm.h>
>   
>   #include "i915_active.h"
>   #include "i915_selftest.h"
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index 92e53c25424c..92558fa47108 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -16,14 +16,11 @@ static void call_idle_barriers(struct intel_engine_cs *engine)
>   	struct llist_node *node, *next;
>   
>   	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
> -		struct i915_active_request *active =
> +		struct dma_fence_cb *cb =
>   			container_of((struct list_head *)node,
> -				     typeof(*active), link);
> +				     typeof(*cb), node);
>   
> -		INIT_LIST_HEAD(&active->link);
> -		RCU_INIT_POINTER(active->request, NULL);
> -
> -		active->retire(active, NULL);
> +		cb->func(NULL, cb);
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index ae7c2689ef30..57e13c6f59c5 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -240,7 +240,7 @@ intel_context_init(struct intel_context *ce,
>   
>   	mutex_init(&ce->pin_mutex);
>   
> -	i915_active_init(ctx->i915, &ce->active,
> +	i915_active_init(&ce->active,
>   			 __intel_context_active, __intel_context_retire);
>   }
>   
> @@ -307,7 +307,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
>   			return err;
>   
>   		/* Queue this switch after current activity by this context. */
> -		err = i915_active_request_set(&tl->last_request, rq);
> +		err = i915_active_fence_set(&tl->last_request, rq);
>   		mutex_unlock(&tl->mutex);
>   		if (err)
>   			return err;
> @@ -321,7 +321,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
>   	 * words transfer the pinned ce object to tracked active request.
>   	 */
>   	GEM_BUG_ON(i915_active_is_idle(&ce->active));
> -	return i915_active_ref(&ce->active, rq->timeline, rq);
> +	return i915_active_ref(&ce->active, rq->timeline, &rq->fence);
>   }
>   
>   struct i915_request *intel_context_create_request(struct intel_context *ce)
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
> index 81fab101fdb4..3cdbd5f8b5be 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
> @@ -95,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
>   		return ERR_PTR(-ENOMEM);
>   
>   	node->pool = pool;
> -	i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
> +	i915_active_init(&node->active, pool_active, pool_retire);
>   
>   	obj = i915_gem_object_create_internal(engine->i915, sz);
>   	if (IS_ERR(obj)) {
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
> index 7e2123b33594..4b0f3ae0c7e2 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
> @@ -18,7 +18,7 @@ static inline int
>   intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
>   			      struct i915_request *rq)
>   {
> -	return i915_active_ref(&node->active, rq->timeline, rq);
> +	return i915_active_ref(&node->active, rq->timeline, &rq->fence);
>   }
>   
>   static inline void
> diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
> index b9d84d52e986..4825c82aefee 100644
> --- a/drivers/gpu/drm/i915/gt/intel_reset.c
> +++ b/drivers/gpu/drm/i915/gt/intel_reset.c
> @@ -814,10 +814,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
>   	 */
>   	spin_lock_irqsave(&timelines->lock, flags);
>   	list_for_each_entry(tl, &timelines->active_list, link) {
> -		struct i915_request *rq;
> +		struct dma_fence *fence;
>   
> -		rq = i915_active_request_get_unlocked(&tl->last_request);
> -		if (!rq)
> +		fence = i915_active_fence_get(&tl->last_request);
> +		if (!fence)
>   			continue;
>   
>   		spin_unlock_irqrestore(&timelines->lock, flags);
> @@ -829,8 +829,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
>   		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
>   		 * in the worst case.
>   		 */
> -		dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
> -		i915_request_put(rq);
> +		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
> +		dma_fence_put(fence);
>   
>   		/* Restart iteration after droping lock */
>   		spin_lock_irqsave(&timelines->lock, flags);
> diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
> index d824bca43d55..75d896167cfb 100644
> --- a/drivers/gpu/drm/i915/gt/intel_timeline.c
> +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
> @@ -178,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
>   	cl->hwsp = hwsp;
>   	cl->vaddr = page_pack_bits(vaddr, cacheline);
>   
> -	i915_active_init(hwsp->gt->i915, &cl->active,
> -			 __cacheline_active, __cacheline_retire);
> +	i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
>   
>   	return cl;
>   }
> @@ -255,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
>   
>   	mutex_init(&timeline->mutex);
>   
> -	INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
> +	INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
>   	INIT_LIST_HEAD(&timeline->requests);
>   
>   	i915_syncmap_init(&timeline->sync);
> @@ -443,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
>   	 * free it after the current request is retired, which ensures that
>   	 * all writes into the cacheline from previous requests are complete.
>   	 */
> -	err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
> +	err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
>   	if (err)
>   		goto err_cacheline;
>   
> @@ -494,7 +493,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
>   static int cacheline_ref(struct intel_timeline_cacheline *cl,
>   			 struct i915_request *rq)
>   {
> -	return i915_active_ref(&cl->active, rq->timeline, rq);
> +	return i915_active_ref(&cl->active, rq->timeline, &rq->fence);
>   }
>   
>   int intel_timeline_read_hwsp(struct i915_request *from,
> diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
> index 2b1baf2fcc8e..6d7ac129ce8a 100644
> --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
> @@ -63,7 +63,7 @@ struct intel_timeline {
>   	 * the request using i915_active_request_get_request_rcu(), or hold the

Looks like a stale comment.

>   	 * struct_mutex.
>   	 */
> -	struct i915_active_request last_request;
> +	struct i915_active_fence last_request;

Worth renaming to last_fence now that request naming is otherwise gone 
from i915_active?

>   
>   	/**
>   	 * We track the most recent seqno that we wait on in every context so
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> index 9d1ea26c7a2d..1420533e8fd5 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_context.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -41,24 +41,20 @@ static int context_sync(struct intel_context *ce)
>   
>   	mutex_lock(&tl->mutex);
>   	do {
> -		struct i915_request *rq;
> +		struct dma_fence *fence;
>   		long timeout;
>   
> -		rcu_read_lock();
> -		rq = rcu_dereference(tl->last_request.request);
> -		if (rq)
> -			rq = i915_request_get_rcu(rq);
> -		rcu_read_unlock();
> -		if (!rq)
> +		fence = i915_active_fence_get(&tl->last_request);
> +		if (!fence)
>   			break;
>   
> -		timeout = i915_request_wait(rq, 0, HZ / 10);
> +		timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
>   		if (timeout < 0)
>   			err = timeout;
>   		else
> -			i915_request_retire_upto(rq);
> +			i915_request_retire_upto(to_request(fence));
>   
> -		i915_request_put(rq);
> +		dma_fence_put(fence);
>   	} while (!err);
>   	mutex_unlock(&tl->mutex);
>   
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index d791158988d6..aca1b3a9c5de 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -984,9 +984,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
>   	if (!rq)
>   		return NULL;
>   
> -	INIT_LIST_HEAD(&rq->active_list);
>   	rq->engine = engine;
>   
> +	spin_lock_init(&rq->lock);
> +	INIT_LIST_HEAD(&rq->fence.cb_list);
> +	rq->fence.lock = &rq->lock;
> +	rq->fence.ops = &i915_fence_ops;
> +
>   	i915_sched_node_init(&rq->sched);
>   
>   	/* mark this request as permanently incomplete */
> @@ -1079,8 +1083,8 @@ static int live_suppress_wait_preempt(void *arg)
>   				}
>   
>   				/* Disable NEWCLIENT promotion */
> -				__i915_active_request_set(&rq[i]->timeline->last_request,
> -							  dummy);
> +				__i915_active_fence_set(&rq[i]->timeline->last_request,
> +							&dummy->fence);
>   				i915_request_add(rq[i]);
>   			}
>   
> diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
> index 598170efcaf6..2a77c051f36a 100644
> --- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
> +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
> @@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
>   
>   	mutex_init(&timeline->mutex);
>   
> -	INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
> +	INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
>   	INIT_LIST_HEAD(&timeline->requests);
>   
>   	i915_syncmap_init(&timeline->sync);
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 8940fa8d391a..6beb753b1ea1 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -385,11 +385,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
>   {
>   	struct intel_vgpu *vgpu = workload->vgpu;
>   	struct intel_vgpu_submission *s = &vgpu->submission;
> -	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
>   	struct i915_request *rq;
>   
> -	lockdep_assert_held(&dev_priv->drm.struct_mutex);
> -
>   	if (workload->req)
>   		return 0;
>   
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index f82d6d931824..2e2ab8176620 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -12,8 +12,6 @@
>   #include "i915_active.h"
>   #include "i915_globals.h"
>   
> -#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
> -
>   /*
>    * Active refs memory management
>    *
> @@ -27,35 +25,35 @@ static struct i915_global_active {
>   } global;
>   
>   struct active_node {
> -	struct i915_active_request base;
> +	struct i915_active_fence base;
>   	struct i915_active *ref;
>   	struct rb_node node;
>   	u64 timeline;
>   };
>   
>   static inline struct active_node *
> -node_from_active(struct i915_active_request *active)
> +node_from_active(struct i915_active_fence *active)
>   {
>   	return container_of(active, struct active_node, base);
>   }
>   
>   #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
>   
> -static inline bool is_barrier(const struct i915_active_request *active)
> +static inline bool is_barrier(const struct i915_active_fence *active)
>   {
> -	return IS_ERR(rcu_access_pointer(active->request));
> +	return IS_ERR(rcu_access_pointer(active->fence));
>   }
>   
>   static inline struct llist_node *barrier_to_ll(struct active_node *node)
>   {
>   	GEM_BUG_ON(!is_barrier(&node->base));
> -	return (struct llist_node *)&node->base.link;
> +	return (struct llist_node *)&node->base.cb.node;
>   }
>   
>   static inline struct intel_engine_cs *
>   __barrier_to_engine(struct active_node *node)
>   {
> -	return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
> +	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
>   }
>   
>   static inline struct intel_engine_cs *
> @@ -68,7 +66,7 @@ barrier_to_engine(struct active_node *node)
>   static inline struct active_node *barrier_from_ll(struct llist_node *x)
>   {
>   	return container_of((struct list_head *)x,
> -			    struct active_node, base.link);
> +			    struct active_node, base.cb.node);
>   }
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
> @@ -147,15 +145,18 @@ __active_retire(struct i915_active *ref)
>   	if (!retire)
>   		return;
>   
> -	GEM_BUG_ON(rcu_access_pointer(ref->excl));
> +	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
>   	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
> -		GEM_BUG_ON(i915_active_request_isset(&it->base));
> +		GEM_BUG_ON(i915_active_fence_isset(&it->base));
>   		kmem_cache_free(global.slab_cache, it);
>   	}
>   
>   	/* After the final retire, the entire struct may be freed */
>   	if (ref->retire)
>   		ref->retire(ref);
> +
> +	/* ... except if you wait on it, you must manage your own references! */
> +	wake_up_var(ref);
>   }
>   
>   static void
> @@ -189,12 +190,20 @@ active_retire(struct i915_active *ref)
>   }
>   
>   static void
> -node_retire(struct i915_active_request *base, struct i915_request *rq)
> +node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
>   {
> -	active_retire(node_from_active(base)->ref);
> +	i915_active_fence_cb(fence, cb);
> +	active_retire(container_of(cb, struct active_node, base.cb)->ref);
>   }
>   
> -static struct i915_active_request *
> +static void
> +excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
> +{
> +	i915_active_fence_cb(fence, cb);
> +	active_retire(container_of(cb, struct i915_active, excl.cb));
> +}
> +
> +static struct i915_active_fence *
>   active_instance(struct i915_active *ref, struct intel_timeline *tl)
>   {
>   	struct active_node *node, *prealloc;
> @@ -238,7 +247,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
>   	}
>   
>   	node = prealloc;
> -	i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
> +	__i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
>   	node->ref = ref;
>   	node->timeline = idx;
>   
> @@ -253,8 +262,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
>   	return &node->base;
>   }
>   
> -void __i915_active_init(struct drm_i915_private *i915,
> -			struct i915_active *ref,
> +void __i915_active_init(struct i915_active *ref,
>   			int (*active)(struct i915_active *ref),
>   			void (*retire)(struct i915_active *ref),
>   			struct lock_class_key *key)
> @@ -263,19 +271,18 @@ void __i915_active_init(struct drm_i915_private *i915,
>   
>   	debug_active_init(ref);
>   
> -	ref->i915 = i915;
>   	ref->flags = 0;
>   	ref->active = active;
>   	ref->retire = ptr_unpack_bits(retire, &bits, 2);
>   	if (bits & I915_ACTIVE_MAY_SLEEP)
>   		ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
>   
> -	ref->excl = NULL;
>   	ref->tree = RB_ROOT;
>   	ref->cache = NULL;
>   	init_llist_head(&ref->preallocated_barriers);
>   	atomic_set(&ref->count, 0);
>   	__mutex_init(&ref->mutex, "i915_active", key);
> +	__i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
>   	INIT_WORK(&ref->work, active_work);
>   }
>   
> @@ -329,9 +336,9 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
>   
>   int i915_active_ref(struct i915_active *ref,
>   		    struct intel_timeline *tl,
> -		    struct i915_request *rq)
> +		    struct dma_fence *fence)
>   {
> -	struct i915_active_request *active;
> +	struct i915_active_fence *active;
>   	int err;
>   
>   	lockdep_assert_held(&tl->mutex);
> @@ -354,65 +361,38 @@ int i915_active_ref(struct i915_active *ref,
>   		 * request that we want to emit on the kernel_context.
>   		 */
>   		__active_del_barrier(ref, node_from_active(active));
> -		RCU_INIT_POINTER(active->request, NULL);
> -		INIT_LIST_HEAD(&active->link);
> -	} else {
> -		if (!i915_active_request_isset(active))
> -			atomic_inc(&ref->count);
> +		RCU_INIT_POINTER(active->fence, NULL);
> +		atomic_dec(&ref->count);
>   	}
> -	GEM_BUG_ON(!atomic_read(&ref->count));
> -	__i915_active_request_set(active, rq);
> +	if (!__i915_active_fence_set(active, fence))
> +		atomic_inc(&ref->count);
>   
>   out:
>   	i915_active_release(ref);
>   	return err;
>   }
>   
> -static void excl_cb(struct dma_fence *f, struct dma_fence_cb *cb)
> -{
> -	struct i915_active *ref = container_of(cb, typeof(*ref), excl_cb);
> -
> -	RCU_INIT_POINTER(ref->excl, NULL);
> -	dma_fence_put(f);
> -
> -	active_retire(ref);
> -}
> -
>   void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
>   {
>   	GEM_BUG_ON(i915_active_is_idle(ref));
>   
> -	dma_fence_get(f);
> -
> -	rcu_read_lock();
> -	if (rcu_access_pointer(ref->excl)) {
> -		struct dma_fence *old;
> -
> -		old = dma_fence_get_rcu_safe(&ref->excl);
> -		if (old) {
> -			if (dma_fence_remove_callback(old, &ref->excl_cb))
> -				atomic_dec(&ref->count);
> -			dma_fence_put(old);
> -		}
> -	}
> -	rcu_read_unlock();
> -
> -	atomic_inc(&ref->count);
> -	rcu_assign_pointer(ref->excl, f);
> +	mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);

This just lockdep annotation? Probably deserves a comment.

> +	if (!__i915_active_fence_set(&ref->excl, f))
> +		atomic_inc(&ref->count);

Refcount management is not better done from inside __i915_active_fence_set?

> +	mutex_release(&ref->mutex.dep_map, 0, _THIS_IP_);
> +}
>   
> -	if (dma_fence_add_callback(f, &ref->excl_cb, excl_cb)) {
> -		RCU_INIT_POINTER(ref->excl, NULL);
> -		atomic_dec(&ref->count);
> -		dma_fence_put(f);
> -	}
> +bool i915_active_acquire_if_busy(struct i915_active *ref)
> +{
> +	debug_active_assert(ref);
> +	return atomic_add_unless(&ref->count, 1, 0);
>   }
>   
>   int i915_active_acquire(struct i915_active *ref)
>   {
>   	int err;
>   
> -	debug_active_assert(ref);
> -	if (atomic_add_unless(&ref->count, 1, 0))
> +	if (i915_active_acquire_if_busy(ref))
>   		return 0;
>   
>   	err = mutex_lock_interruptible(&ref->mutex);
> @@ -437,121 +417,49 @@ void i915_active_release(struct i915_active *ref)
>   	active_retire(ref);
>   }
>   
> -static void __active_ungrab(struct i915_active *ref)
> -{
> -	clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
> -}
> -
> -bool i915_active_trygrab(struct i915_active *ref)
> -{
> -	debug_active_assert(ref);
> -
> -	if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
> -		return false;
> -
> -	if (!atomic_add_unless(&ref->count, 1, 0)) {
> -		__active_ungrab(ref);
> -		return false;
> -	}
> -
> -	return true;
> -}
> -
> -void i915_active_ungrab(struct i915_active *ref)
> -{
> -	GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
> -
> -	active_retire(ref);
> -	__active_ungrab(ref);
> -}
> -
> -static int excl_wait(struct i915_active *ref)
> -{
> -	struct dma_fence *old;
> -	int err = 0;
> -
> -	if (!rcu_access_pointer(ref->excl))
> -		return 0;
> -
> -	rcu_read_lock();
> -	old = dma_fence_get_rcu_safe(&ref->excl);
> -	rcu_read_unlock();
> -	if (old) {
> -		err = dma_fence_wait(old, true);
> -		dma_fence_put(old);
> -	}
> -
> -	return err;
> -}
> -
>   int i915_active_wait(struct i915_active *ref)
>   {
>   	struct active_node *it, *n;
> -	int err;
> +	int err = 0;
>   
>   	might_sleep();
> -	might_lock(&ref->mutex);
>   
> -	if (i915_active_is_idle(ref))
> +	if (!i915_active_acquire_if_busy(ref))
>   		return 0;
>   
> -	err = mutex_lock_interruptible(&ref->mutex);
> -	if (err)
> -		return err;
> -
> -	if (!atomic_add_unless(&ref->count, 1, 0)) {
> -		mutex_unlock(&ref->mutex);
> -		return 0;
> -	}
> +	/* Flush lazy signals */
> +	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
> +		struct dma_fence *fence;
>   
> -	err = excl_wait(ref);
> -	if (err)
> -		goto out;
> +		if (is_barrier(&it->base)) /* unconnected idle barrier */
> +			continue;
>   
> -	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
> -		if (is_barrier(&it->base)) { /* unconnected idle-barrier */
> -			err = -EBUSY;
> -			break;
> +		fence = i915_active_fence_get(&it->base);
> +		if (fence) {
> +			dma_fence_enable_sw_signaling(fence);
> +			dma_fence_put(fence);
>   		}
> -
> -		err = i915_active_request_retire(&it->base, BKL(ref));
> -		if (err)
> -			break;
>   	}
> -
> -out:
> -	__active_retire(ref);
> +	/* Any fence added after the wait begins will not be auto-signaled */
> +	i915_active_release(ref);
>   	if (err)
>   		return err;
>   
> -	if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
> +	if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
>   		return -EINTR;
>   
> -	flush_work(&ref->work);
> -	if (!i915_active_is_idle(ref))
> -		return -EBUSY;
> -
>   	return 0;
>   }
>   
> -int i915_request_await_active_request(struct i915_request *rq,
> -				      struct i915_active_request *active)
> -{
> -	struct i915_request *barrier =
> -		i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
> -
> -	return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
> -}
> -
>   int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
>   {
>   	int err = 0;
>   
> -	if (rcu_access_pointer(ref->excl)) {
> +	if (rcu_access_pointer(ref->excl.fence)) {
>   		struct dma_fence *fence;
>   
>   		rcu_read_lock();
> -		fence = dma_fence_get_rcu_safe(&ref->excl);
> +		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
>   		rcu_read_unlock();
>   		if (fence) {
>   			err = i915_request_await_dma_fence(rq, fence);
> @@ -577,7 +485,7 @@ void i915_active_fini(struct i915_active *ref)
>   
>   static inline bool is_idle_barrier(struct active_node *node, u64 idx)
>   {
> -	return node->timeline == idx && !i915_active_request_isset(&node->base);
> +	return node->timeline == idx && !i915_active_fence_isset(&node->base);
>   }
>   
>   static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
> @@ -697,13 +605,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   			node->base.lock =
>   				&engine->kernel_context->timeline->mutex;
>   #endif
> -			RCU_INIT_POINTER(node->base.request, NULL);
> -			node->base.retire = node_retire;
> +			RCU_INIT_POINTER(node->base.fence, NULL);
> +			node->base.cb.func = node_retire;
>   			node->timeline = idx;
>   			node->ref = ref;
>   		}
>   
> -		if (!i915_active_request_isset(&node->base)) {
> +		if (!i915_active_fence_isset(&node->base)) {
>   			/*
>   			 * Mark this as being *our* unconnected proto-node.
>   			 *
> @@ -713,8 +621,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   			 * and then we can use the rb_node and list pointers
>   			 * for our tracking of the pending barrier.
>   			 */
> -			RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
> -			node->base.link.prev = (void *)engine;
> +			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
> +			node->base.cb.node.prev = (void *)engine;
>   			atomic_inc(&ref->count);
>   		}
>   
> @@ -781,25 +689,65 @@ void i915_request_add_active_barriers(struct i915_request *rq)
>   {
>   	struct intel_engine_cs *engine = rq->engine;
>   	struct llist_node *node, *next;
> +	unsigned long flags;
>   
>   	GEM_BUG_ON(intel_engine_is_virtual(engine));
>   	GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
>   
> +	node = llist_del_all(&engine->barrier_tasks);
> +	if (!node)
> +		return;
>   	/*
>   	 * Attach the list of proto-fences to the in-flight request such
>   	 * that the parent i915_active will be released when this request
>   	 * is retired.
>   	 */
> -	llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
> -		RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
> +	spin_lock_irqsave(&rq->lock, flags);
> +	llist_for_each_safe(node, next, node) {
> +		RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
>   		smp_wmb(); /* serialise with reuse_idle_barrier */
> -		list_add_tail((struct list_head *)node, &rq->active_list);
> +		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
> +	}
> +	spin_unlock_irqrestore(&rq->lock, flags);
> +}
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
> +#define active_is_held(active) lockdep_is_held((active)->lock)
> +#else
> +#define active_is_held(active) true
> +#endif
> +
> +struct dma_fence *
> +__i915_active_fence_set(struct i915_active_fence *active,
> +			struct dma_fence *fence)
> +{
> +	struct dma_fence *prev;
> +	unsigned long flags;
> +
> +	/* NB: updates must be serialised by an outer timeline mutex */
> +	spin_lock_irqsave(fence->lock, flags);
> +	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
> +
> +	prev = rcu_dereference_protected(active->fence, active_is_held(active));
> +	if (prev) {
> +		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
> +		__list_del_entry(&active->cb.node);
> +		spin_unlock(prev->lock); /* serialise with prev->cb_list */
> +		prev = rcu_access_pointer(active->fence);

Why it is important to re-read active->fence and does it then need a 
comment?

How does it tie with i915_active->count refcounting which is done on the 
basis of return value from this function?

>   	}
> +
> +	rcu_assign_pointer(active->fence, fence);
> +	list_add_tail(&active->cb.node, &fence->cb_list);
> +
> +	spin_unlock_irqrestore(fence->lock, flags);
> +
> +	return prev;
>   }
>   
> -int i915_active_request_set(struct i915_active_request *active,
> -			    struct i915_request *rq)
> +int i915_active_fence_set(struct i915_active_fence *active,
> +			  struct i915_request *rq)
>   {
> +	struct dma_fence *fence;
>   	int err;
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
> @@ -807,18 +755,25 @@ int i915_active_request_set(struct i915_active_request *active,
>   #endif
>   
>   	/* Must maintain ordering wrt previous active requests */
> -	err = i915_request_await_active_request(rq, active);
> -	if (err)
> -		return err;
> +	rcu_read_lock();
> +	fence = __i915_active_fence_set(active, &rq->fence);
> +	if (fence)
> +		fence = dma_fence_get_rcu(fence); > +	rcu_read_unlock();
> +
> +	if (fence) {
> +		err = i915_request_await_dma_fence(rq, fence);
> +		dma_fence_put(fence);
> +		if (err)
> +			return err;
> +	}
>   
> -	__i915_active_request_set(active, rq);
>   	return 0;
>   }
>   
> -void i915_active_retire_noop(struct i915_active_request *active,
> -			     struct i915_request *request)
> +void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
>   {
> -	/* Space left intentionally blank */
> +	i915_active_fence_cb(fence, cb);
>   }
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
> index af3d536e26fd..e1912d539014 100644
> --- a/drivers/gpu/drm/i915/i915_active.h
> +++ b/drivers/gpu/drm/i915/i915_active.h
> @@ -10,7 +10,10 @@
>   #include <linux/lockdep.h>
>   
>   #include "i915_active_types.h"
> -#include "i915_request.h"
> +
> +struct i915_request;
> +struct intel_engine_cs;
> +struct intel_timeline;
>   
>   /*
>    * We treat requests as fences. This is not be to confused with our
> @@ -28,308 +31,108 @@
>    * write access so that we can perform concurrent read operations between
>    * the CPU and GPU engines, as well as waiting for all rendering to
>    * complete, or waiting for the last GPU user of a "fence register". The
> - * object then embeds a #i915_active_request to track the most recent (in
> + * object then embeds a #i915_active_fence to track the most recent (in
>    * retirement order) request relevant for the desired mode of access.
> - * The #i915_active_request is updated with i915_active_request_set() to
> + * The #i915_active_fence is updated with i915_active_fence_set() to
>    * track the most recent fence request, typically this is done as part of
>    * i915_vma_move_to_active().
>    *
> - * When the #i915_active_request completes (is retired), it will
> + * When the #i915_active_fence completes (is retired), it will
>    * signal its completion to the owner through a callback as well as mark
> - * itself as idle (i915_active_request.request == NULL). The owner
> + * itself as idle (i915_active_fence.request == NULL). The owner
>    * can then perform any action, such as delayed freeing of an active
>    * resource including itself.
>    */
>   
> -void i915_active_retire_noop(struct i915_active_request *active,
> -			     struct i915_request *request);
> +void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
>   
>   /**
> - * i915_active_request_init - prepares the activity tracker for use
> + * __i915_active_fence_init - prepares the activity tracker for use
>    * @active - the active tracker
> - * @rq - initial request to track, can be NULL
> + * @fence - initial fence to track, can be NULL
>    * @func - a callback when then the tracker is retired (becomes idle),
>    *         can be NULL
>    *
> - * i915_active_request_init() prepares the embedded @active struct for use as
> - * an activity tracker, that is for tracking the last known active request
> - * associated with it. When the last request becomes idle, when it is retired
> + * i915_active_fence_init() prepares the embedded @active struct for use as
> + * an activity tracker, that is for tracking the last known active fence
> + * associated with it. When the last fence becomes idle, when it is retired
>    * after completion, the optional callback @func is invoked.
>    */
>   static inline void
> -i915_active_request_init(struct i915_active_request *active,
> +__i915_active_fence_init(struct i915_active_fence *active,
>   			 struct mutex *lock,
> -			 struct i915_request *rq,
> -			 i915_active_retire_fn retire)
> +			 void *fence,
> +			 dma_fence_func_t fn)
>   {
> -	RCU_INIT_POINTER(active->request, rq);
> -	INIT_LIST_HEAD(&active->link);
> -	active->retire = retire ?: i915_active_retire_noop;
> +	RCU_INIT_POINTER(active->fence, fence);
> +	active->cb.func = fn ?: i915_active_noop;
>   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
>   	active->lock = lock;
>   #endif
>   }
>   
> -#define INIT_ACTIVE_REQUEST(name, lock) \
> -	i915_active_request_init((name), (lock), NULL, NULL)
> -
> -/**
> - * i915_active_request_set - updates the tracker to watch the current request
> - * @active - the active tracker
> - * @request - the request to watch
> - *
> - * __i915_active_request_set() watches the given @request for completion. Whilst
> - * that @request is busy, the @active reports busy. When that @request is
> - * retired, the @active tracker is updated to report idle.
> - */
> -static inline void
> -__i915_active_request_set(struct i915_active_request *active,
> -			  struct i915_request *request)
> -{
> -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
> -	lockdep_assert_held(active->lock);
> -#endif
> -	list_move(&active->link, &request->active_list);
> -	rcu_assign_pointer(active->request, request);
> -}
> +#define INIT_ACTIVE_FENCE(A, LOCK) \
> +	__i915_active_fence_init((A), (LOCK), NULL, NULL)
>   
> -int __must_check
> -i915_active_request_set(struct i915_active_request *active,
> -			struct i915_request *rq);
> +struct dma_fence *
> +__i915_active_fence_set(struct i915_active_fence *active,
> +			struct dma_fence *fence);
>   
>   /**
> - * i915_active_request_raw - return the active request
> + * i915_active_fence_set - updates the tracker to watch the current fence
>    * @active - the active tracker
> + * @rq - the request to watch
>    *
> - * i915_active_request_raw() returns the current request being tracked, or NULL.
> - * It does not obtain a reference on the request for the caller, so the caller
> - * must hold struct_mutex.
> + * i915_active_fence_set() watches the given @rq for completion. While
> + * that @rq is busy, the @active reports busy. When that @rq is signaled
> + * (or else retired) the @active tracker is updated to report idle.
>    */
> -static inline struct i915_request *
> -i915_active_request_raw(const struct i915_active_request *active,
> -			struct mutex *mutex)
> -{
> -	return rcu_dereference_protected(active->request,
> -					 lockdep_is_held(mutex));
> -}
> -
> -/**
> - * i915_active_request_peek - report the active request being monitored
> - * @active - the active tracker
> - *
> - * i915_active_request_peek() returns the current request being tracked if
> - * still active, or NULL. It does not obtain a reference on the request
> - * for the caller, so the caller must hold struct_mutex.
> - */
> -static inline struct i915_request *
> -i915_active_request_peek(const struct i915_active_request *active,
> -			 struct mutex *mutex)
> -{
> -	struct i915_request *request;
> -
> -	request = i915_active_request_raw(active, mutex);
> -	if (!request || i915_request_completed(request))
> -		return NULL;
> -
> -	return request;
> -}
> -
> -/**
> - * i915_active_request_get - return a reference to the active request
> - * @active - the active tracker
> - *
> - * i915_active_request_get() returns a reference to the active request, or NULL
> - * if the active tracker is idle. The caller must hold struct_mutex.
> - */
> -static inline struct i915_request *
> -i915_active_request_get(const struct i915_active_request *active,
> -			struct mutex *mutex)
> -{
> -	return i915_request_get(i915_active_request_peek(active, mutex));
> -}
> -
> -/**
> - * __i915_active_request_get_rcu - return a reference to the active request
> - * @active - the active tracker
> - *
> - * __i915_active_request_get() returns a reference to the active request,
> - * or NULL if the active tracker is idle. The caller must hold the RCU read
> - * lock, but the returned pointer is safe to use outside of RCU.
> - */
> -static inline struct i915_request *
> -__i915_active_request_get_rcu(const struct i915_active_request *active)
> -{
> -	/*
> -	 * Performing a lockless retrieval of the active request is super
> -	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
> -	 * slab of request objects will not be freed whilst we hold the
> -	 * RCU read lock. It does not guarantee that the request itself
> -	 * will not be freed and then *reused*. Viz,
> -	 *
> -	 * Thread A			Thread B
> -	 *
> -	 * rq = active.request
> -	 *				retire(rq) -> free(rq);
> -	 *				(rq is now first on the slab freelist)
> -	 *				active.request = NULL
> -	 *
> -	 *				rq = new submission on a new object
> -	 * ref(rq)
> -	 *
> -	 * To prevent the request from being reused whilst the caller
> -	 * uses it, we take a reference like normal. Whilst acquiring
> -	 * the reference we check that it is not in a destroyed state
> -	 * (refcnt == 0). That prevents the request being reallocated
> -	 * whilst the caller holds on to it. To check that the request
> -	 * was not reallocated as we acquired the reference we have to
> -	 * check that our request remains the active request across
> -	 * the lookup, in the same manner as a seqlock. The visibility
> -	 * of the pointer versus the reference counting is controlled
> -	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
> -	 *
> -	 * In the middle of all that, we inspect whether the request is
> -	 * complete. Retiring is lazy so the request may be completed long
> -	 * before the active tracker is updated. Querying whether the
> -	 * request is complete is far cheaper (as it involves no locked
> -	 * instructions setting cachelines to exclusive) than acquiring
> -	 * the reference, so we do it first. The RCU read lock ensures the
> -	 * pointer dereference is valid, but does not ensure that the
> -	 * seqno nor HWS is the right one! However, if the request was
> -	 * reallocated, that means the active tracker's request was complete.
> -	 * If the new request is also complete, then both are and we can
> -	 * just report the active tracker is idle. If the new request is
> -	 * incomplete, then we acquire a reference on it and check that
> -	 * it remained the active request.
> -	 *
> -	 * It is then imperative that we do not zero the request on
> -	 * reallocation, so that we can chase the dangling pointers!
> -	 * See i915_request_alloc().
> -	 */
> -	do {
> -		struct i915_request *request;
> -
> -		request = rcu_dereference(active->request);
> -		if (!request || i915_request_completed(request))
> -			return NULL;
> -
> -		/*
> -		 * An especially silly compiler could decide to recompute the
> -		 * result of i915_request_completed, more specifically
> -		 * re-emit the load for request->fence.seqno. A race would catch
> -		 * a later seqno value, which could flip the result from true to
> -		 * false. Which means part of the instructions below might not
> -		 * be executed, while later on instructions are executed. Due to
> -		 * barriers within the refcounting the inconsistency can't reach
> -		 * past the call to i915_request_get_rcu, but not executing
> -		 * that while still executing i915_request_put() creates
> -		 * havoc enough.  Prevent this with a compiler barrier.
> -		 */
> -		barrier();
> -
> -		request = i915_request_get_rcu(request);
> -
> -		/*
> -		 * What stops the following rcu_access_pointer() from occurring
> -		 * before the above i915_request_get_rcu()? If we were
> -		 * to read the value before pausing to get the reference to
> -		 * the request, we may not notice a change in the active
> -		 * tracker.
> -		 *
> -		 * The rcu_access_pointer() is a mere compiler barrier, which
> -		 * means both the CPU and compiler are free to perform the
> -		 * memory read without constraint. The compiler only has to
> -		 * ensure that any operations after the rcu_access_pointer()
> -		 * occur afterwards in program order. This means the read may
> -		 * be performed earlier by an out-of-order CPU, or adventurous
> -		 * compiler.
> -		 *
> -		 * The atomic operation at the heart of
> -		 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
> -		 * atomic_inc_not_zero() which is only a full memory barrier
> -		 * when successful. That is, if i915_request_get_rcu()
> -		 * returns the request (and so with the reference counted
> -		 * incremented) then the following read for rcu_access_pointer()
> -		 * must occur after the atomic operation and so confirm
> -		 * that this request is the one currently being tracked.
> -		 *
> -		 * The corresponding write barrier is part of
> -		 * rcu_assign_pointer().
> -		 */
> -		if (!request || request == rcu_access_pointer(active->request))
> -			return rcu_pointer_handoff(request);
> -
> -		i915_request_put(request);
> -	} while (1);
> -}
> -
> +int __must_check
> +i915_active_fence_set(struct i915_active_fence *active,
> +		      struct i915_request *rq);
>   /**
> - * i915_active_request_get_unlocked - return a reference to the active request
> + * i915_active_fence_get - return a reference to the active fence
>    * @active - the active tracker
>    *
> - * i915_active_request_get_unlocked() returns a reference to the active request,
> + * i915_active_fence_get() returns a reference to the active fence,
>    * or NULL if the active tracker is idle. The reference is obtained under RCU,
>    * so no locking is required by the caller.
>    *
> - * The reference should be freed with i915_request_put().
> + * The reference should be freed with dma_fence_put().
>    */
> -static inline struct i915_request *
> -i915_active_request_get_unlocked(const struct i915_active_request *active)
> +static inline struct dma_fence *
> +i915_active_fence_get(struct i915_active_fence *active)
>   {
> -	struct i915_request *request;
> +	struct dma_fence *fence;
>   
>   	rcu_read_lock();
> -	request = __i915_active_request_get_rcu(active);
> +	fence = dma_fence_get_rcu_safe(&active->fence);
>   	rcu_read_unlock();
>   
> -	return request;
> +	return fence;
>   }
>   
>   /**
> - * i915_active_request_isset - report whether the active tracker is assigned
> + * i915_active_fence_isset - report whether the active tracker is assigned
>    * @active - the active tracker
>    *
> - * i915_active_request_isset() returns true if the active tracker is currently
> - * assigned to a request. Due to the lazy retiring, that request may be idle
> + * i915_active_fence_isset() returns true if the active tracker is currently
> + * assigned to a fence. Due to the lazy retiring, that fence may be idle
>    * and this may report stale information.
>    */
>   static inline bool
> -i915_active_request_isset(const struct i915_active_request *active)
> +i915_active_fence_isset(const struct i915_active_fence *active)
>   {
> -	return rcu_access_pointer(active->request);
> +	return rcu_access_pointer(active->fence);
>   }
>   
> -/**
> - * i915_active_request_retire - waits until the request is retired
> - * @active - the active request on which to wait
> - *
> - * i915_active_request_retire() waits until the request is completed,
> - * and then ensures that at least the retirement handler for this
> - * @active tracker is called before returning. If the @active
> - * tracker is idle, the function returns immediately.
> - */
> -static inline int __must_check
> -i915_active_request_retire(struct i915_active_request *active,
> -			   struct mutex *mutex)
> +static inline void
> +i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
>   {
> -	struct i915_request *request;
> -	long ret;
> -
> -	request = i915_active_request_raw(active, mutex);
> -	if (!request)
> -		return 0;
> -
> -	ret = i915_request_wait(request,
> -				I915_WAIT_INTERRUPTIBLE,
> -				MAX_SCHEDULE_TIMEOUT);
> -	if (ret < 0)
> -		return ret;
> +	struct i915_active_fence *active =
> +		container_of(cb, typeof(*active), cb);
>   
> -	list_del_init(&active->link);
> -	RCU_INIT_POINTER(active->request, NULL);
> -
> -	active->retire(active, request);
> -
> -	return 0;
> +	RCU_INIT_POINTER(active->fence, NULL);
>   }
>   
>   /*
> @@ -358,41 +161,34 @@ i915_active_request_retire(struct i915_active_request *active,
>    * synchronisation.
>    */
>   
> -void __i915_active_init(struct drm_i915_private *i915,
> -			struct i915_active *ref,
> +void __i915_active_init(struct i915_active *ref,
>   			int (*active)(struct i915_active *ref),
>   			void (*retire)(struct i915_active *ref),
>   			struct lock_class_key *key);
> -#define i915_active_init(i915, ref, active, retire) do {		\
> +#define i915_active_init(ref, active, retire) do {		\
>   	static struct lock_class_key __key;				\
>   									\
> -	__i915_active_init(i915, ref, active, retire, &__key);		\
> +	__i915_active_init(ref, active, retire, &__key);		\
>   } while (0)
>   
>   int i915_active_ref(struct i915_active *ref,
>   		    struct intel_timeline *tl,
> -		    struct i915_request *rq);
> +		    struct dma_fence *fence);
>   
>   void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
>   
>   static inline bool i915_active_has_exclusive(struct i915_active *ref)
>   {
> -	return rcu_access_pointer(ref->excl);
> +	return rcu_access_pointer(ref->excl.fence);
>   }
>   
>   int i915_active_wait(struct i915_active *ref);
>   
> -int i915_request_await_active(struct i915_request *rq,
> -			      struct i915_active *ref);
> -int i915_request_await_active_request(struct i915_request *rq,
> -				      struct i915_active_request *active);
> +int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
>   
>   int i915_active_acquire(struct i915_active *ref);
> +bool i915_active_acquire_if_busy(struct i915_active *ref);
>   void i915_active_release(struct i915_active *ref);
> -void __i915_active_release_nested(struct i915_active *ref, int subclass);
> -
> -bool i915_active_trygrab(struct i915_active *ref);
> -void i915_active_ungrab(struct i915_active *ref);
>   
>   static inline bool
>   i915_active_is_idle(const struct i915_active *ref)
> diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
> index 021167f0004d..d89a74c142c6 100644
> --- a/drivers/gpu/drm/i915/i915_active_types.h
> +++ b/drivers/gpu/drm/i915/i915_active_types.h
> @@ -17,17 +17,9 @@
>   
>   #include "i915_utils.h"
>   
> -struct drm_i915_private;
> -struct i915_active_request;
> -struct i915_request;
> -
> -typedef void (*i915_active_retire_fn)(struct i915_active_request *,
> -				      struct i915_request *);
> -
> -struct i915_active_request {
> -	struct i915_request __rcu *request;
> -	struct list_head link;
> -	i915_active_retire_fn retire;
> +struct i915_active_fence {
> +	struct dma_fence __rcu *fence;
> +	struct dma_fence_cb cb;
>   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
>   	/*
>   	 * Incorporeal!
> @@ -53,20 +45,17 @@ struct active_node;
>   #define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
>   
>   struct i915_active {
> -	struct drm_i915_private *i915;
> +	atomic_t count;
> +	struct mutex mutex;
>   
>   	struct active_node *cache;
>   	struct rb_root tree;
> -	struct mutex mutex;
> -	atomic_t count;
>   
>   	/* Preallocated "exclusive" node */
> -	struct dma_fence __rcu *excl;
> -	struct dma_fence_cb excl_cb;
> +	struct i915_active_fence excl;
>   
>   	unsigned long flags;
>   #define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
> -#define I915_ACTIVE_GRAB_BIT 1
>   
>   	int (*active)(struct i915_active *ref);
>   	void (*retire)(struct i915_active *ref);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 3eed2efa8d13..1aadab1cdd24 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -900,28 +900,38 @@ wait_for_timelines(struct drm_i915_private *i915,
>   
>   	spin_lock_irqsave(&timelines->lock, flags);
>   	list_for_each_entry(tl, &timelines->active_list, link) {
> -		struct i915_request *rq;
> +		struct dma_fence *fence;
>   
> -		rq = i915_active_request_get_unlocked(&tl->last_request);
> -		if (!rq)
> +		fence = i915_active_fence_get(&tl->last_request);
> +		if (!fence)
>   			continue;
>   
>   		spin_unlock_irqrestore(&timelines->lock, flags);
>   
> -		/*
> -		 * "Race-to-idle".
> -		 *
> -		 * Switching to the kernel context is often used a synchronous
> -		 * step prior to idling, e.g. in suspend for flushing all
> -		 * current operations to memory before sleeping. These we
> -		 * want to complete as quickly as possible to avoid prolonged
> -		 * stalls, so allow the gpu to boost to maximum clocks.
> -		 */
> -		if (wait & I915_WAIT_FOR_IDLE_BOOST)
> -			gen6_rps_boost(rq);
> +		if (!dma_fence_is_i915(fence)) {
> +			timeout = dma_fence_wait_timeout(fence,
> +							 flags & I915_WAIT_INTERRUPTIBLE,
> +							 timeout);

We did not have support for non-i915 fences before this patch right? 
Would it be better to split this out in this case?

> +		} else {
> +			struct i915_request *rq = to_request(fence);
> +
> +			/*
> +			 * "Race-to-idle".
> +			 *
> +			 * Switching to the kernel context is often used as
> +			 * a synchronous step prior to idling, e.g. in suspend
> +			 * for flushing all current operations to memory before
> +			 * sleeping. These we want to complete as quickly as
> +			 * possible to avoid prolonged stalls, so allow the gpu
> +			 * to boost to maximum clocks.
> +			 */
> +			if (flags & I915_WAIT_FOR_IDLE_BOOST)
> +				gen6_rps_boost(rq);
> +
> +			timeout = i915_request_wait(rq, flags, timeout);
> +		}
>   
> -		timeout = i915_request_wait(rq, wait, timeout);
> -		i915_request_put(rq);
> +		dma_fence_put(fence);
>   		if (timeout < 0)
>   			return timeout;
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index b2e4827788fc..60676de059a7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -1858,7 +1858,6 @@ static const struct i915_vma_ops pd_vma_ops = {
>   
>   static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
>   {
> -	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
>   	struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
>   	struct i915_vma *vma;
>   
> @@ -1869,7 +1868,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
>   	if (!vma)
>   		return ERR_PTR(-ENOMEM);
>   
> -	i915_active_init(i915, &vma->active, NULL, NULL);
> +	i915_active_init(&vma->active, NULL, NULL);
>   
>   	mutex_init(&vma->pages_mutex);
>   	vma->vm = i915_vm_get(&ggtt->vm);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 3ccf7fd9307f..2c0071ff2d2d 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1292,7 +1292,7 @@ capture_vma(struct capture_vma *next,
>   	if (!c)
>   		return next;
>   
> -	if (!i915_active_trygrab(&vma->active)) {
> +	if (!i915_active_acquire_if_busy(&vma->active)) {
>   		kfree(c);
>   		return next;
>   	}
> @@ -1432,7 +1432,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
>   			*this->slot =
>   				i915_error_object_create(i915, vma, compress);
>   
> -			i915_active_ungrab(&vma->active);
> +			i915_active_release(&vma->active);
>   			i915_vma_put(vma);
>   
>   			capture = this->next;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 754a78364a63..4ecfae143276 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -197,9 +197,8 @@ static void free_capture_list(struct i915_request *request)
>   
>   static bool i915_request_retire(struct i915_request *rq)
>   {
> -	struct i915_active_request *active, *next;
> -
>   	lockdep_assert_held(&rq->timeline->mutex);
> +
>   	if (!i915_request_completed(rq))
>   		return false;
>   
> @@ -223,35 +222,6 @@ static bool i915_request_retire(struct i915_request *rq)
>   	GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
>   	rq->ring->head = rq->postfix;
>   
> -	/*
> -	 * Walk through the active list, calling retire on each. This allows
> -	 * objects to track their GPU activity and mark themselves as idle
> -	 * when their *last* active request is completed (updating state
> -	 * tracking lists for eviction, active references for GEM, etc).
> -	 *
> -	 * As the ->retire() may free the node, we decouple it first and
> -	 * pass along the auxiliary information (to avoid dereferencing
> -	 * the node after the callback).
> -	 */
> -	list_for_each_entry_safe(active, next, &rq->active_list, link) {
> -		/*
> -		 * In microbenchmarks or focusing upon time inside the kernel,
> -		 * we may spend an inordinate amount of time simply handling
> -		 * the retirement of requests and processing their callbacks.
> -		 * Of which, this loop itself is particularly hot due to the
> -		 * cache misses when jumping around the list of
> -		 * i915_active_request.  So we try to keep this loop as
> -		 * streamlined as possible and also prefetch the next
> -		 * i915_active_request to try and hide the likely cache miss.
> -		 */
> -		prefetchw(next);
> -
> -		INIT_LIST_HEAD(&active->link);
> -		RCU_INIT_POINTER(active->request, NULL);
> -
> -		active->retire(active, rq);
> -	}
> -
>   	local_irq_disable();
>   
>   	/*
> @@ -664,7 +634,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
>   	rq->flags = 0;
>   	rq->execution_mask = ALL_ENGINES;
>   
> -	INIT_LIST_HEAD(&rq->active_list);
>   	INIT_LIST_HEAD(&rq->execute_cb);
>   
>   	/*
> @@ -703,7 +672,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
>   	ce->ring->emit = rq->head;
>   
>   	/* Make sure we didn't add ourselves to external state before freeing */
> -	GEM_BUG_ON(!list_empty(&rq->active_list));
>   	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
>   	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
>   
> @@ -1096,8 +1064,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
>   	 * precludes optimising to use semaphores serialisation of a single
>   	 * timeline across engines.
>   	 */
> -	prev = rcu_dereference_protected(timeline->last_request.request,
> -					 lockdep_is_held(&timeline->mutex));
> +	prev = to_request(__i915_active_fence_set(&timeline->last_request,
> +						  &rq->fence));
>   	if (prev && !i915_request_completed(prev)) {
>   		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
>   			i915_sw_fence_await_sw_fence(&rq->submit,
> @@ -1122,7 +1090,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
>   	 * us, the timeline will hold its seqno which is later than ours.
>   	 */
>   	GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
> -	__i915_active_request_set(&timeline->last_request, rq);
>   
>   	return prev;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 8ac6e1226a56..3251d2bdbeea 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -211,7 +211,6 @@ struct i915_request {
>   	 * on the active_list (of their final request).
>   	 */
>   	struct i915_capture_list *capture_list;
> -	struct list_head active_list;
>   
>   	/** Time at which this request was emitted, in jiffies. */
>   	unsigned long emitted_jiffies;
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 5ec8c970f9a2..2c703fd3c2b9 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -120,8 +120,7 @@ vma_create(struct drm_i915_gem_object *obj,
>   	vma->size = obj->base.size;
>   	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
>   
> -	i915_active_init(vm->i915, &vma->active,
> -			 __i915_vma_active, __i915_vma_retire);
> +	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
>   
>   	/* Declare ourselves safe for use inside shrinkers */
>   	if (IS_ENABLED(CONFIG_LOCKDEP)) {
> @@ -1100,7 +1099,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
>   	 * add the active reference first and queue for it to be dropped
>   	 * *last*.
>   	 */
> -	err = i915_active_ref(&vma->active, rq->timeline, rq);
> +	err = i915_active_ref(&vma->active, rq->timeline, &rq->fence);
>   	if (unlikely(err))
>   		return err;
>   
> @@ -1108,7 +1107,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
>   		if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
>   			i915_active_ref(&obj->frontbuffer->write,
>   					rq->timeline,
> -					rq);
> +					&rq->fence);
>   
>   		dma_resv_add_excl_fence(vma->resv, &rq->fence);
>   		obj->write_domain = I915_GEM_DOMAIN_RENDER;
> @@ -1146,6 +1145,7 @@ int i915_vma_unbind(struct i915_vma *vma)
>   	if (ret)
>   		return ret;
>   
> +	GEM_BUG_ON(i915_vma_is_active(vma));
>   	if (i915_vma_is_pinned(vma)) {
>   		vma_print_allocator(vma, "is pinned");
>   		return -EBUSY;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
> index d5ac9944d093..af5827aac7b2 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_active.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_active.c
> @@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
>   		return NULL;
>   
>   	kref_init(&active->ref);
> -	i915_active_init(i915, &active->base, __live_active, __live_retire);
> +	i915_active_init(&active->base, __live_active, __live_retire);
>   
>   	return active;
>   }
> @@ -110,7 +110,9 @@ __live_active_setup(struct drm_i915_private *i915)
>   						       submit,
>   						       GFP_KERNEL);
>   		if (err >= 0)
> -			err = i915_active_ref(&active->base, rq->timeline, rq);
> +			err = i915_active_ref(&active->base,
> +					      rq->timeline,
> +					      &rq->fence);
>   		i915_request_add(rq);
>   		if (err) {
>   			pr_err("Failed to track active ref!\n");
> @@ -146,19 +148,13 @@ static int live_active_wait(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
>   	struct live_active *active;
> -	intel_wakeref_t wakeref;
>   	int err = 0;
>   
>   	/* Check that we get a callback when requests retire upon waiting */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	active = __live_active_setup(i915);
> -	if (IS_ERR(active)) {
> -		err = PTR_ERR(active);
> -		goto err;
> -	}
> +	if (IS_ERR(active))
> +		return PTR_ERR(active);
>   
>   	i915_active_wait(&active->base);
>   	if (!READ_ONCE(active->retired)) {
> @@ -168,11 +164,9 @@ static int live_active_wait(void *arg)
>   
>   	__live_put(active);
>   
> +	mutex_lock(&i915->drm.struct_mutex);
>   	if (igt_flush_test(i915, I915_WAIT_LOCKED))
>   		err = -EIO;
> -
> -err:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
>   	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	return err;
> @@ -182,23 +176,19 @@ static int live_active_retire(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
>   	struct live_active *active;
> -	intel_wakeref_t wakeref;
>   	int err = 0;
>   
>   	/* Check that we get a callback when requests are indirectly retired */
>   
> -	mutex_lock(&i915->drm.struct_mutex);
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> -
>   	active = __live_active_setup(i915);
> -	if (IS_ERR(active)) {
> -		err = PTR_ERR(active);
> -		goto err;
> -	}
> +	if (IS_ERR(active))
> +		return PTR_ERR(active);
>   
>   	/* waits for & retires all requests */
> +	mutex_lock(&i915->drm.struct_mutex);
>   	if (igt_flush_test(i915, I915_WAIT_LOCKED))
>   		err = -EIO;
> +	mutex_unlock(&i915->drm.struct_mutex);
>   
>   	if (!READ_ONCE(active->retired)) {
>   		pr_err("i915_active not retired after flushing!\n");
> @@ -207,10 +197,6 @@ static int live_active_retire(void *arg)
>   
>   	__live_put(active);
>   
> -err:
> -	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
> -	mutex_unlock(&i915->drm.struct_mutex);
> -
>   	return err;
>   }
>   
> 

I can't really grasp it from the diff. Will need to apply and try again. 
It removes a lot of code so bonus points for that. :)

Regards,

Tvrtko


More information about the Intel-gfx mailing list