[PATCH 4/5] drm/i915: Remove duplicate of i915_active in intel_context, v2.

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Wed Jul 7 11:56:22 UTC 2021


Make intel_context_pin() mean the same as active, and
intel_context_unpin() same as retire. This removes the
second layer of pinning, and hopefully makes the code
slightly cleaner.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context.c       | 93 ++++---------------
 drivers/gpu/drm/i915/gt/intel_context.h       | 45 +++++----
 drivers/gpu/drm/i915/gt/intel_context_sseu.c  |  2 +-
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 +-
 .../drm/i915/gt/intel_execlists_submission.c  |  3 +-
 drivers/gpu/drm/i915/gt/selftest_execlists.c  | 79 ----------------
 drivers/gpu/drm/i915/i915_active.c            |  4 +-
 drivers/gpu/drm/i915/i915_request.c           |  8 +-
 8 files changed, 62 insertions(+), 177 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 156db93de426..64cdbc04411e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -53,7 +53,7 @@ int intel_context_alloc_state(struct intel_context *ce)
 {
 	int err = 0;
 
-	if (mutex_lock_interruptible(&ce->pin_mutex))
+	if (mutex_lock_interruptible(&ce->active.mutex))
 		return -EINTR;
 
 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
@@ -70,35 +70,10 @@ int intel_context_alloc_state(struct intel_context *ce)
 	}
 
 unlock:
-	mutex_unlock(&ce->pin_mutex);
+	mutex_unlock(&ce->active.mutex);
 	return err;
 }
 
-static int intel_context_active_acquire(struct intel_context *ce)
-{
-	int err;
-
-	__i915_active_acquire(&ce->active);
-
-	if (intel_context_is_barrier(ce))
-		return 0;
-
-	/* Preallocate tracking nodes */
-	err = i915_active_acquire_preallocate_barrier(&ce->active,
-						      ce->engine);
-	if (err)
-		i915_active_release(&ce->active);
-
-	return err;
-}
-
-static void intel_context_active_release(struct intel_context *ce)
-{
-	/* Nodes preallocated in intel_context_active() */
-	i915_active_acquire_barrier(&ce->active);
-	i915_active_release(&ce->active);
-}
-
 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
 {
 	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
@@ -203,7 +178,6 @@ static void intel_context_post_unpin(struct intel_context *ce)
 int __intel_context_do_pin_ww(struct intel_context *ce,
 			      struct i915_gem_ww_ctx *ww)
 {
-	void *vaddr;
 	int err = 0;
 
 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
@@ -228,50 +202,26 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
 	if (err)
 		return err;
 
-	err = i915_active_acquire(&ce->active);
+	err = ce->ops->pre_pin(ce, ww, &ce->vaddr);
 	if (err)
 		goto err_ctx_unpin;
 
-	err = ce->ops->pre_pin(ce, ww, &vaddr);
-	if (err)
-		goto err_release;
-
-	err = mutex_lock_interruptible(&ce->pin_mutex);
+	err = i915_active_acquire(&ce->active);
 	if (err)
 		goto err_post_unpin;
 
 	if (unlikely(intel_context_is_closed(ce))) {
 		err = -ENOENT;
-		goto err_unlock;
-	}
-
-	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
-		err = intel_context_active_acquire(ce);
-		if (unlikely(err))
-			goto err_unlock;
-
-		err = ce->ops->pin(ce, vaddr);
-		if (err) {
-			intel_context_active_release(ce);
-			goto err_unlock;
-		}
-
-		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
-			 i915_ggtt_offset(ce->ring->vma),
-			 ce->ring->head, ce->ring->tail);
-
-		smp_mb__before_atomic(); /* flush pin before it is visible */
-		atomic_inc(&ce->pin_count);
+		goto err_release;
 	}
 
 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+	goto err_post_unpin;
 
-err_unlock:
-	mutex_unlock(&ce->pin_mutex);
-err_post_unpin:
-	ce->ops->post_unpin(ce);
 err_release:
 	i915_active_release(&ce->active);
+err_post_unpin:
+	ce->ops->post_unpin(ce);
 err_ctx_unpin:
 	intel_context_post_unpin(ce);
 
@@ -305,21 +255,9 @@ int __intel_context_do_pin(struct intel_context *ce)
 
 void intel_context_unpin(struct intel_context *ce)
 {
-	if (!atomic_dec_and_test(&ce->pin_count))
-		return;
-
 	CE_TRACE(ce, "unpin\n");
-	ce->ops->unpin(ce);
 
-	/*
-	 * Once released, we may asynchronously drop the active reference.
-	 * As that may be the only reference keeping the context alive,
-	 * take an extra now so that it is not freed before we finish
-	 * dereferencing it.
-	 */
-	intel_context_get(ce);
-	intel_context_active_release(ce);
-	intel_context_put(ce);
+	i915_active_release(&ce->active);
 }
 
 static void __intel_context_retire(struct i915_active *active)
@@ -330,6 +268,7 @@ static void __intel_context_retire(struct i915_active *active)
 		 intel_context_get_total_runtime_ns(ce),
 		 intel_context_get_avg_runtime_ns(ce));
 
+	ce->ops->unpin(ce);
 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
 	intel_context_post_unpin(ce);
 	intel_context_put(ce);
@@ -338,6 +277,15 @@ static void __intel_context_retire(struct i915_active *active)
 static int __intel_context_active(struct i915_active *active)
 {
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
+	int err;
+
+	err = ce->ops->pin(ce, ce->vaddr);
+	if (err)
+		return err;
+
+	CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+		 i915_ggtt_offset(ce->ring->vma),
+		 ce->ring->head, ce->ring->tail);
 
 	intel_context_get(ce);
 
@@ -377,8 +325,6 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
 	spin_lock_init(&ce->signal_lock);
 	INIT_LIST_HEAD(&ce->signals);
 
-	mutex_init(&ce->pin_mutex);
-
 	i915_active_init(&ce->active,
 			 __intel_context_active, __intel_context_retire, 0);
 }
@@ -389,7 +335,6 @@ void intel_context_fini(struct intel_context *ce)
 		intel_timeline_put(ce->timeline);
 	i915_vm_put(ce->vm);
 
-	mutex_destroy(&ce->pin_mutex);
 	i915_active_fini(&ce->active);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index f83a73a2b39f..29ee043a9d88 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -49,9 +49,9 @@ int intel_context_reconfigure_sseu(struct intel_context *ce,
  * intel_context_is_pinned() remains stable.
  */
 static inline int intel_context_lock_pinned(struct intel_context *ce)
-	__acquires(ce->pin_mutex)
+	__acquires(ce->active.mutex)
 {
-	return mutex_lock_interruptible(&ce->pin_mutex);
+	return mutex_lock_interruptible(&ce->active.mutex);
 }
 
 /**
@@ -66,7 +66,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce)
 static inline bool
 intel_context_is_pinned(struct intel_context *ce)
 {
-	return atomic_read(&ce->pin_count);
+	return !i915_active_is_idle(&ce->active);
 }
 
 /**
@@ -76,9 +76,9 @@ intel_context_is_pinned(struct intel_context *ce)
  * Releases the lock earlier acquired by intel_context_unlock_pinned().
  */
 static inline void intel_context_unlock_pinned(struct intel_context *ce)
-	__releases(ce->pin_mutex)
+	__releases(ce->active.mutex)
 {
-	mutex_unlock(&ce->pin_mutex);
+	mutex_unlock(&ce->active.mutex);
 }
 
 int __intel_context_do_pin(struct intel_context *ce);
@@ -87,7 +87,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
 
 static inline bool intel_context_pin_if_active(struct intel_context *ce)
 {
-	return atomic_inc_not_zero(&ce->pin_count);
+	return i915_active_acquire_if_busy(&ce->active);
 }
 
 static inline int intel_context_pin(struct intel_context *ce)
@@ -110,19 +110,34 @@ static inline int intel_context_pin_ww(struct intel_context *ce,
 static inline void __intel_context_pin(struct intel_context *ce)
 {
 	GEM_BUG_ON(!intel_context_is_pinned(ce));
-	atomic_inc(&ce->pin_count);
+	__i915_active_acquire(&ce->active);
 }
 
 void intel_context_unpin(struct intel_context *ce);
-
 void intel_context_enter_engine(struct intel_context *ce);
 void intel_context_exit_engine(struct intel_context *ce);
 
-static inline void intel_context_enter(struct intel_context *ce)
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
+static inline int intel_context_enter(struct intel_context *ce)
 {
 	lockdep_assert_held(&ce->timeline->mutex);
-	if (!ce->active_count++)
+	if (!ce->active_count) {
+		if (!intel_context_is_barrier(ce)) {
+			int ret =
+				i915_active_acquire_preallocate_barrier(&ce->active,
+									ce->engine);
+			if (ret)
+				return ret;
+		}
 		ce->ops->enter(ce);
+	}
+
+	ce->active_count++;
+	return 0;
 }
 
 static inline void intel_context_mark_active(struct intel_context *ce)
@@ -135,8 +150,11 @@ static inline void intel_context_exit(struct intel_context *ce)
 {
 	lockdep_assert_held(&ce->timeline->mutex);
 	GEM_BUG_ON(!ce->active_count);
-	if (!--ce->active_count)
+
+	if (!--ce->active_count) {
 		ce->ops->exit(ce);
+		i915_active_acquire_barrier(&ce->active);
+	}
 }
 
 static inline struct intel_context *intel_context_get(struct intel_context *ce)
@@ -180,11 +198,6 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
 	return u64_to_ptr(struct intel_ring, sz);
 }
 
-static inline bool intel_context_is_barrier(const struct intel_context *ce)
-{
-	return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
-}
-
 static inline bool intel_context_is_closed(const struct intel_context *ce)
 {
 	return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index e86d8255feec..2b72a8c15b7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -42,7 +42,7 @@ gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
 	struct i915_request *rq;
 	int ret;
 
-	lockdep_assert_held(&ce->pin_mutex);
+	lockdep_assert_held(&ce->active.mutex);
 
 	/*
 	 * If the context is not idle, we have to submit an ordered request to
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ed8c447a7346..fd72e8517523 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -121,15 +121,14 @@ struct intel_context {
 
 	unsigned int active_count; /* protected by timeline->mutex */
 
-	atomic_t pin_count;
-	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
-
 	/**
 	 * active: Active tracker for the rq activity (inc. external) on this
 	 * intel_context object.
 	 */
 	struct i915_active active;
 
+	void *vaddr;
+
 	const struct intel_context_ops *ops;
 
 	/** sseu: Control eu/slice partitioning */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index cdb2126a159a..133a592aa961 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -668,7 +668,8 @@ static u64 execlists_update_context(struct i915_request *rq)
 	 * HW has a tendency to ignore us rewinding the TAIL to the end of
 	 * an earlier request.
 	 */
-	GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+	CE_TRACE(ce, "expected %x actual %x\n", ce->lrc_reg_state[CTX_RING_TAIL], rq->ring->tail);
+	GEM_WARN_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
 	prev = rq->ring->tail;
 	tail = intel_ring_set_tail(rq->ring, rq->tail);
 	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 08896ae027d5..63a73c1863a8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -473,84 +473,6 @@ static int live_unlite_ring(void *arg)
 	return err;
 }
 
-static int live_pin_rewind(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	/*
-	 * We have to be careful not to trust intel_ring too much, for example
-	 * ring->head is updated upon retire which is out of sync with pinning
-	 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
-	 * or else we risk writing an older, stale value.
-	 *
-	 * To simulate this, let's apply a bit of deliberate sabotague.
-	 */
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-		struct intel_ring *ring;
-		struct igt_live_test t;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			break;
-		}
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			break;
-		}
-
-		err = intel_context_pin(ce);
-		if (err) {
-			intel_context_put(ce);
-			break;
-		}
-
-		/* Keep the context awake while we play games */
-		err = i915_active_acquire(&ce->active);
-		if (err) {
-			intel_context_unpin(ce);
-			intel_context_put(ce);
-			break;
-		}
-		ring = ce->ring;
-
-		/* Poison the ring, and offset the next request from HEAD */
-		memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
-		ring->emit = ring->size / 2;
-		ring->tail = ring->emit;
-		GEM_BUG_ON(ring->head);
-
-		intel_context_unpin(ce);
-
-		/* Submit a simple nop request */
-		GEM_BUG_ON(intel_context_is_pinned(ce));
-		rq = intel_context_create_request(ce);
-		i915_active_release(&ce->active); /* e.g. async retire */
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			break;
-		}
-		GEM_BUG_ON(!rq->head);
-		i915_request_add(rq);
-
-		/* Expect not to hang! */
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			break;
-		}
-	}
-
-	return err;
-}
-
 static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
 {
 	tasklet_disable(&engine->sched_engine->tasklet);
@@ -4698,7 +4620,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_unlite_switch),
 		SUBTEST(live_unlite_preempt),
 		SUBTEST(live_unlite_ring),
-		SUBTEST(live_pin_rewind),
 		SUBTEST(live_hold_reset),
 		SUBTEST(live_error_interrupt),
 		SUBTEST(live_timeslice_preempt),
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index b1aa1c482c32..32b66be92458 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -889,8 +889,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
 	/* Wait until the previous preallocation is completed */
-	while (!llist_empty(&ref->preallocated_barriers))
-		cond_resched();
+	if (WARN_ON(!llist_empty(&ref->preallocated_barriers)))
+		return 0;
 
 	/*
 	 * Preallocate a node for each physical engine supporting the target
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c5989c0b83d3..ceb6252674ce 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1061,6 +1061,7 @@ i915_request_create(struct intel_context *ce)
 {
 	struct i915_request *rq;
 	struct intel_timeline *tl;
+	int err;
 
 	tl = intel_context_timeline_lock(ce);
 	if (IS_ERR(tl))
@@ -1071,7 +1072,12 @@ i915_request_create(struct intel_context *ce)
 	if (!list_is_last(&rq->link, &tl->requests))
 		i915_request_retire(rq);
 
-	intel_context_enter(ce);
+	err = intel_context_enter(ce);
+	if (err) {
+		rq = ERR_PTR(err);
+		goto err_unlock;
+	}
+
 	rq = __i915_request_create(ce, GFP_KERNEL);
 	intel_context_exit(ce); /* active reference transferred to request */
 	if (IS_ERR(rq))
-- 
2.31.0



More information about the Intel-gfx-trybot mailing list