[PATCH 4/5] drm/i915: Remove duplicate of i915_active in intel_context

Maarten Lankhorst maarten.lankhorst at linux.intel.com
Tue Jul 6 10:28:15 UTC 2021


Make intel_context_pin() mean the same as active, and
intel_context_unpin() same as retire. This removes the
second layer of pinning, and hopefully makes the code
slightly cleaner.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context.c       | 61 +++++---------
 drivers/gpu/drm/i915/gt/intel_context.h       | 26 +++---
 drivers/gpu/drm/i915/gt/intel_context_sseu.c  |  2 +-
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 +-
 .../drm/i915/gt/intel_execlists_submission.c  |  3 +-
 drivers/gpu/drm/i915/gt/selftest_execlists.c  | 79 -------------------
 6 files changed, 41 insertions(+), 135 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 156db93de426..b5f7c4a80c3e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -53,7 +53,7 @@ int intel_context_alloc_state(struct intel_context *ce)
 {
 	int err = 0;
 
-	if (mutex_lock_interruptible(&ce->pin_mutex))
+	if (mutex_lock_interruptible(&ce->active.mutex))
 		return -EINTR;
 
 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
@@ -70,7 +70,7 @@ int intel_context_alloc_state(struct intel_context *ce)
 	}
 
 unlock:
-	mutex_unlock(&ce->pin_mutex);
+	mutex_unlock(&ce->active.mutex);
 	return err;
 }
 
@@ -203,7 +203,6 @@ static void intel_context_post_unpin(struct intel_context *ce)
 int __intel_context_do_pin_ww(struct intel_context *ce,
 			      struct i915_gem_ww_ctx *ww)
 {
-	void *vaddr;
 	int err = 0;
 
 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
@@ -228,50 +227,29 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
 	if (err)
 		return err;
 
-	err = i915_active_acquire(&ce->active);
+	err = ce->ops->pre_pin(ce, ww, &ce->vaddr);
 	if (err)
 		goto err_ctx_unpin;
 
-	err = ce->ops->pre_pin(ce, ww, &vaddr);
-	if (err)
-		goto err_release;
-
-	err = mutex_lock_interruptible(&ce->pin_mutex);
+	err = i915_active_acquire(&ce->active);
 	if (err)
 		goto err_post_unpin;
 
 	if (unlikely(intel_context_is_closed(ce))) {
 		err = -ENOENT;
-		goto err_unlock;
+		goto err_release;
 	}
 
-	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
-		err = intel_context_active_acquire(ce);
-		if (unlikely(err))
-			goto err_unlock;
-
-		err = ce->ops->pin(ce, vaddr);
-		if (err) {
-			intel_context_active_release(ce);
-			goto err_unlock;
-		}
-
-		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
-			 i915_ggtt_offset(ce->ring->vma),
-			 ce->ring->head, ce->ring->tail);
-
-		smp_mb__before_atomic(); /* flush pin before it is visible */
-		atomic_inc(&ce->pin_count);
-	}
+	err = intel_context_active_acquire(ce);
+	if (unlikely(err))
+		goto err_release;
 
 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
 
-err_unlock:
-	mutex_unlock(&ce->pin_mutex);
-err_post_unpin:
-	ce->ops->post_unpin(ce);
 err_release:
 	i915_active_release(&ce->active);
+err_post_unpin:
+	ce->ops->post_unpin(ce);
 err_ctx_unpin:
 	intel_context_post_unpin(ce);
 
@@ -305,11 +283,7 @@ int __intel_context_do_pin(struct intel_context *ce)
 
 void intel_context_unpin(struct intel_context *ce)
 {
-	if (!atomic_dec_and_test(&ce->pin_count))
-		return;
-
 	CE_TRACE(ce, "unpin\n");
-	ce->ops->unpin(ce);
 
 	/*
 	 * Once released, we may asynchronously drop the active reference.
@@ -317,9 +291,7 @@ void intel_context_unpin(struct intel_context *ce)
 	 * take an extra now so that it is not freed before we finish
 	 * dereferencing it.
 	 */
-	intel_context_get(ce);
 	intel_context_active_release(ce);
-	intel_context_put(ce);
 }
 
 static void __intel_context_retire(struct i915_active *active)
@@ -330,6 +302,7 @@ static void __intel_context_retire(struct i915_active *active)
 		 intel_context_get_total_runtime_ns(ce),
 		 intel_context_get_avg_runtime_ns(ce));
 
+	ce->ops->unpin(ce);
 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
 	intel_context_post_unpin(ce);
 	intel_context_put(ce);
@@ -338,6 +311,15 @@ static void __intel_context_retire(struct i915_active *active)
 static int __intel_context_active(struct i915_active *active)
 {
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
+	int err;
+
+	err = ce->ops->pin(ce, ce->vaddr);
+	if (err)
+		return err;
+
+	CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+		 i915_ggtt_offset(ce->ring->vma),
+		 ce->ring->head, ce->ring->tail);
 
 	intel_context_get(ce);
 
@@ -377,8 +359,6 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
 	spin_lock_init(&ce->signal_lock);
 	INIT_LIST_HEAD(&ce->signals);
 
-	mutex_init(&ce->pin_mutex);
-
 	i915_active_init(&ce->active,
 			 __intel_context_active, __intel_context_retire, 0);
 }
@@ -389,7 +369,6 @@ void intel_context_fini(struct intel_context *ce)
 		intel_timeline_put(ce->timeline);
 	i915_vm_put(ce->vm);
 
-	mutex_destroy(&ce->pin_mutex);
 	i915_active_fini(&ce->active);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index f83a73a2b39f..9bed8cd387d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -49,9 +49,9 @@ int intel_context_reconfigure_sseu(struct intel_context *ce,
  * intel_context_is_pinned() remains stable.
  */
 static inline int intel_context_lock_pinned(struct intel_context *ce)
-	__acquires(ce->pin_mutex)
+	__acquires(ce->active.mutex)
 {
-	return mutex_lock_interruptible(&ce->pin_mutex);
+	return mutex_lock_interruptible(&ce->active.mutex);
 }
 
 /**
@@ -66,7 +66,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce)
 static inline bool
 intel_context_is_pinned(struct intel_context *ce)
 {
-	return atomic_read(&ce->pin_count);
+	return !i915_active_is_idle(&ce->active);
 }
 
 /**
@@ -76,9 +76,9 @@ intel_context_is_pinned(struct intel_context *ce)
  * Releases the lock earlier acquired by intel_context_unlock_pinned().
  */
 static inline void intel_context_unlock_pinned(struct intel_context *ce)
-	__releases(ce->pin_mutex)
+	__releases(ce->active.mutex)
 {
-	mutex_unlock(&ce->pin_mutex);
+	mutex_unlock(&ce->active.mutex);
 }
 
 int __intel_context_do_pin(struct intel_context *ce);
@@ -87,13 +87,20 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
 
 static inline bool intel_context_pin_if_active(struct intel_context *ce)
 {
-	return atomic_inc_not_zero(&ce->pin_count);
+	return i915_active_acquire_if_busy(&ce->active);
 }
 
 static inline int intel_context_pin(struct intel_context *ce)
 {
-	if (likely(intel_context_pin_if_active(ce)))
-		return 0;
+	if (likely(intel_context_pin_if_active(ce))) {
+		int err = i915_active_acquire_preallocate_barrier(&ce->active,
+								  ce->engine);
+
+		if (err)
+			i915_active_release(&ce->active);
+
+		return err;
+	}
 
 	return __intel_context_do_pin(ce);
 }
@@ -110,11 +117,10 @@ static inline int intel_context_pin_ww(struct intel_context *ce,
 static inline void __intel_context_pin(struct intel_context *ce)
 {
 	GEM_BUG_ON(!intel_context_is_pinned(ce));
-	atomic_inc(&ce->pin_count);
+	__i915_active_acquire(&ce->active);
 }
 
 void intel_context_unpin(struct intel_context *ce);
-
 void intel_context_enter_engine(struct intel_context *ce);
 void intel_context_exit_engine(struct intel_context *ce);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
index e86d8255feec..2b72a8c15b7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -42,7 +42,7 @@ gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
 	struct i915_request *rq;
 	int ret;
 
-	lockdep_assert_held(&ce->pin_mutex);
+	lockdep_assert_held(&ce->active.mutex);
 
 	/*
 	 * If the context is not idle, we have to submit an ordered request to
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ed8c447a7346..fd72e8517523 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -121,15 +121,14 @@ struct intel_context {
 
 	unsigned int active_count; /* protected by timeline->mutex */
 
-	atomic_t pin_count;
-	struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
-
 	/**
 	 * active: Active tracker for the rq activity (inc. external) on this
 	 * intel_context object.
 	 */
 	struct i915_active active;
 
+	void *vaddr;
+
 	const struct intel_context_ops *ops;
 
 	/** sseu: Control eu/slice partitioning */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index cdb2126a159a..133a592aa961 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -668,7 +668,8 @@ static u64 execlists_update_context(struct i915_request *rq)
 	 * HW has a tendency to ignore us rewinding the TAIL to the end of
 	 * an earlier request.
 	 */
-	GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+	CE_TRACE(ce, "expected %x actual %x\n", ce->lrc_reg_state[CTX_RING_TAIL], rq->ring->tail);
+	GEM_WARN_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
 	prev = rq->ring->tail;
 	tail = intel_ring_set_tail(rq->ring, rq->tail);
 	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 08896ae027d5..63a73c1863a8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -473,84 +473,6 @@ static int live_unlite_ring(void *arg)
 	return err;
 }
 
-static int live_pin_rewind(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	/*
-	 * We have to be careful not to trust intel_ring too much, for example
-	 * ring->head is updated upon retire which is out of sync with pinning
-	 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
-	 * or else we risk writing an older, stale value.
-	 *
-	 * To simulate this, let's apply a bit of deliberate sabotague.
-	 */
-
-	for_each_engine(engine, gt, id) {
-		struct intel_context *ce;
-		struct i915_request *rq;
-		struct intel_ring *ring;
-		struct igt_live_test t;
-
-		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
-			err = -EIO;
-			break;
-		}
-
-		ce = intel_context_create(engine);
-		if (IS_ERR(ce)) {
-			err = PTR_ERR(ce);
-			break;
-		}
-
-		err = intel_context_pin(ce);
-		if (err) {
-			intel_context_put(ce);
-			break;
-		}
-
-		/* Keep the context awake while we play games */
-		err = i915_active_acquire(&ce->active);
-		if (err) {
-			intel_context_unpin(ce);
-			intel_context_put(ce);
-			break;
-		}
-		ring = ce->ring;
-
-		/* Poison the ring, and offset the next request from HEAD */
-		memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
-		ring->emit = ring->size / 2;
-		ring->tail = ring->emit;
-		GEM_BUG_ON(ring->head);
-
-		intel_context_unpin(ce);
-
-		/* Submit a simple nop request */
-		GEM_BUG_ON(intel_context_is_pinned(ce));
-		rq = intel_context_create_request(ce);
-		i915_active_release(&ce->active); /* e.g. async retire */
-		intel_context_put(ce);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			break;
-		}
-		GEM_BUG_ON(!rq->head);
-		i915_request_add(rq);
-
-		/* Expect not to hang! */
-		if (igt_live_test_end(&t)) {
-			err = -EIO;
-			break;
-		}
-	}
-
-	return err;
-}
-
 static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
 {
 	tasklet_disable(&engine->sched_engine->tasklet);
@@ -4698,7 +4620,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_unlite_switch),
 		SUBTEST(live_unlite_preempt),
 		SUBTEST(live_unlite_ring),
-		SUBTEST(live_pin_rewind),
 		SUBTEST(live_hold_reset),
 		SUBTEST(live_error_interrupt),
 		SUBTEST(live_timeslice_preempt),
-- 
2.31.0



More information about the Intel-gfx-trybot mailing list