[PATCH 68/75] drm/i915: Replace i915_request.engine with i915_request.sched.engine

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 2 13:11:54 UTC 2021


Now that the primary scheduling control is separated from the CS engine,
we want to track the scheduler itself on the request (the scheduler's
job). We have very little use for the physical engine outside of request
submission, so demote its backpointer.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine.h        |   2 -
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  14 +--
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   3 -
 .../drm/i915/gt/intel_execlists_submission.c  |  32 +++--
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    |   2 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  13 +-
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |   4 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |   2 +-
 drivers/gpu/drm/i915/i915_request.c           |  39 +++---
 drivers/gpu/drm/i915/i915_request.h           |   5 +-
 drivers/gpu/drm/i915/i915_scheduler.c         | 113 ++++++++----------
 drivers/gpu/drm/i915/i915_scheduler.h         |  15 ++-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |   5 +
 .../gpu/drm/i915/selftests/i915_scheduler.c   |   9 +-
 14 files changed, 119 insertions(+), 139 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 0de02f2fc127..b3127a0fe6fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -182,8 +182,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 int intel_engines_init_mmio(struct intel_gt *gt);
 int intel_engines_init(struct intel_gt *gt);
 
-void intel_engine_free_request_pool(struct intel_engine_cs *engine);
-
 void intel_engines_release(struct intel_gt *gt);
 void intel_engines_free(struct intel_gt *gt);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 39a00ea3c631..79e463fa1661 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -403,6 +403,9 @@ void intel_engines_release(struct intel_gt *gt)
 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
 		__intel_gt_reset(gt, ALL_ENGINES);
 
+	/* Flush rcu'ed i915_fence_free before removing the engines */
+	rcu_barrier();
+
 	/* Decouple the backend; but keep the layout for late GPU resets */
 	for_each_engine(engine, gt, id) {
 		if (!engine->release)
@@ -418,14 +421,6 @@ void intel_engines_release(struct intel_gt *gt)
 	}
 }
 
-void intel_engine_free_request_pool(struct intel_engine_cs *engine)
-{
-	if (!engine->request_pool)
-		return;
-
-	kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
-}
-
 void intel_engines_free(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
@@ -435,7 +430,6 @@ void intel_engines_free(struct intel_gt *gt)
 	rcu_barrier();
 
 	for_each_engine(engine, gt, id) {
-		intel_engine_free_request_pool(engine);
 		kfree(engine);
 		gt->engine[id] = NULL;
 	}
@@ -744,9 +738,9 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
 	if (!frame)
 		return -ENOMEM;
 
-	frame->rq.__engine = engine;
 	frame->rq.context = ce;
 	rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+	frame->rq.sched.engine = se;
 
 	frame->ring.vaddr = frame->cs;
 	frame->ring.size = sizeof(frame->cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 61673cf36b01..4764817302fd 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -296,9 +296,6 @@ struct intel_engine_cs {
 
 	struct i915_sched sched;
 
-	/* keep a request in reserve for a [pm] barrier under oom */
-	struct i915_request *request_pool;
-
 	struct llist_head barrier_tasks;
 
 	struct intel_context *kernel_context; /* pinned */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9950e4dc9789..1d41c17dfbc9 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -178,7 +178,7 @@ struct virtual_engine {
 	 * use one of sibling_mask physical engines.
 	 */
 	struct ve_bond {
-		const struct intel_engine_cs *master;
+		const struct i915_sched *master;
 		intel_engine_mask_t sibling_mask;
 	} *bonds;
 	unsigned int num_bonds;
@@ -540,10 +540,10 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
 	/* Resubmit the queue in execution order */
 	spin_lock(&se->lock);
 	list_for_each_entry_from(pos, &tl->requests, link) {
-		if (pos->__engine == &ve->base)
+		if (pos->sched.engine == se)
 			break;
 
-		__i915_request_requeue(pos, &ve->base);
+		__i915_request_requeue(pos, se);
 	}
 	spin_unlock(&se->lock);
 
@@ -1244,7 +1244,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			     yesno(engine != ve->siblings[0]));
 
 		GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-		if (__i915_request_requeue(rq, engine)) {
+		if (__i915_request_requeue(rq, &engine->sched)) {
 			/*
 			 * Only after we confirm that we will submit
 			 * this request (i.e. it has not already
@@ -1264,7 +1264,6 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			if (!ve->context.inflight)
 				WRITE_ONCE(ve->context.inflight, engine);
 
-			GEM_BUG_ON(rq->__engine != engine);
 			GEM_BUG_ON(ve->siblings[0] != engine);
 			GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
 
@@ -1413,7 +1412,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * the preemption, some of the unwound requests may
 		 * complete!
 		 */
-		last = __i915_sched_rewind_requests(engine);
+		last = __i915_sched_rewind_requests(se);
 
 		/*
 		 * We want to move the interrupted request to the back of
@@ -1423,7 +1422,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * be run after it again.
 		 */
 		if (last && defer)
-			__i915_sched_defer_request(engine, last);
+			__i915_sched_defer_request(se, last);
 
 		last = NULL;
 	}
@@ -1970,7 +1969,7 @@ static void execlists_capture_work(struct work_struct *work)
 	i915_gpu_coredump_put(cap->error);
 
 	/* Return this request and all that depend upon it for signaling */
-	i915_sched_resume_request(cap->rq->__engine, cap->rq);
+	i915_sched_resume_request(cap->rq->sched.engine, cap->rq);
 	i915_request_put(cap->rq);
 
 	kfree(cap);
@@ -2104,7 +2103,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
 	 * simply hold that request accountable for being non-preemptible
 	 * long enough to force the reset.
 	 */
-	if (!i915_sched_suspend_request(engine, cap->rq))
+	if (!i915_sched_suspend_request(&engine->sched, cap->rq))
 		goto err_rq;
 
 	INIT_WORK(&cap->work, execlists_capture_work);
@@ -2708,7 +2707,7 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	/* Push back any incomplete requests for replay after the reset. */
 	rcu_read_lock();
 	spin_lock_irqsave(&se->lock, flags);
-	__i915_sched_rewind_requests(engine);
+	__i915_sched_rewind_requests(se);
 	spin_unlock_irqrestore(&se->lock, flags);
 	rcu_read_unlock();
 }
@@ -3201,8 +3200,6 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 	lrc_fini(&ve->context);
 	intel_context_fini(&ve->context);
 
-	intel_engine_free_request_pool(&ve->base);
-
 	kfree(ve->bonds);
 	kfree(ve);
 }
@@ -3428,8 +3425,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
 }
 
 static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
-		  const struct intel_engine_cs *master)
+virtual_find_bond(struct virtual_engine *ve, const struct i915_sched *master)
 {
 	int i;
 
@@ -3448,9 +3444,9 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 	intel_engine_mask_t allowed, exec;
 	struct ve_bond *bond;
 
-	allowed = ~to_request(signal)->__engine->mask;
+	allowed = ~to_request(signal)->sched.engine->mask;
 
-	bond = virtual_find_bond(ve, to_request(signal)->__engine);
+	bond = virtual_find_bond(ve, to_request(signal)->sched.engine);
 	if (bond)
 		allowed &= bond->sibling_mask;
 
@@ -3632,7 +3628,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (n == ve->num_siblings)
 		return -EINVAL;
 
-	bond = virtual_find_bond(ve, master);
+	bond = virtual_find_bond(ve, &master->sched);
 	if (bond) {
 		bond->sibling_mask |= sibling->mask;
 		return 0;
@@ -3644,7 +3640,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (!bond)
 		return -ENOMEM;
 
-	bond[ve->num_bonds].master = master;
+	bond[ve->num_bonds].master = &master->sched;
 	bond[ve->num_bonds].sibling_mask = sibling->mask;
 
 	ve->bonds = bond;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index a5bc5f0864fd..1cb4af93981e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -684,7 +684,7 @@ static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
 
 	rcu_read_lock();
 	spin_lock_irqsave(&se->lock, flags);
-	rq = __i915_sched_rewind_requests(engine);
+	rq = __i915_sched_rewind_requests(se);
 	spin_unlock_irqrestore(&se->lock, flags);
 	if (rq && __i915_request_has_started(rq))
 		__i915_request_reset(rq, stalled);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index f10778bedd83..9c2cdd8e18ce 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -69,13 +69,14 @@ static struct i915_request *first_request(struct mock_engine *engine)
 					mock.link);
 }
 
-static void advance(struct i915_request *request)
+static void advance(struct i915_request *request,
+		    struct mock_engine *engine)
 {
 	list_del_init(&request->mock.link);
 	i915_request_mark_complete(request);
 	GEM_BUG_ON(!i915_request_completed(request));
 
-	intel_engine_signal_breadcrumbs(i915_request_get_engine(request));
+	intel_engine_signal_breadcrumbs(&engine->base);
 }
 
 static void hw_delay_complete(struct timer_list *t)
@@ -89,7 +90,7 @@ static void hw_delay_complete(struct timer_list *t)
 	/* Timer fired, first request is complete */
 	request = first_request(engine);
 	if (request)
-		advance(request);
+		advance(request, engine);
 
 	/*
 	 * Also immediately signal any subsequent 0-delay requests, but
@@ -102,7 +103,7 @@ static void hw_delay_complete(struct timer_list *t)
 			break;
 		}
 
-		advance(request);
+		advance(request, engine);
 	}
 
 	spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -219,7 +220,7 @@ static void mock_submit_request(struct i915_request *request)
 			mod_timer(&engine->hw_delay,
 				  jiffies + request->mock.delay);
 		else
-			advance(request);
+			advance(request, engine);
 	}
 	spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
@@ -375,7 +376,7 @@ void mock_engine_flush(struct intel_engine_cs *engine)
 
 	spin_lock_irq(&mock->hw_lock);
 	list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
-		advance(request);
+		advance(request, mock);
 	spin_unlock_irq(&mock->hw_lock);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index df38298d463b..eec53d5ad8ce 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -609,7 +609,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		i915_request_get(rq);
-		i915_sched_suspend_request(engine, rq);
+		i915_sched_suspend_request(&engine->sched, rq);
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		__intel_engine_reset_bh(engine, NULL);
@@ -631,7 +631,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		/* But is resubmitted on release */
-		i915_sched_resume_request(engine, rq);
+		i915_sched_resume_request(&engine->sched, rq);
 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 			pr_err("%s: held request did not complete!\n",
 			       engine->name);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9b82f2793e9e..aba9e4ea6375 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -312,7 +312,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	spin_lock_irqsave(&se->lock, flags);
 
 	/* Push back any incomplete requests for replay after the reset. */
-	rq = __i915_sched_rewind_requests(engine);
+	rq = __i915_sched_rewind_requests(&engine->sched);
 	if (!rq)
 		goto out_unlock;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5ebf0e5166f8..80a3214d4ecf 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -58,7 +58,7 @@ static struct i915_global_request {
 
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
-	return dev_name(to_request(fence)->__engine->i915->drm.dev);
+	return dev_name(to_request(fence)->sched.engine->dbg.dev);
 }
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -154,7 +154,7 @@ static void i915_fence_release(struct dma_fence *fence)
 	 * can be a physical engine with the exact corresponding mask.
 	 */
 	if (is_power_of_2(rq->execution_mask) &&
-	    !cmpxchg(&rq->__engine->request_pool, NULL, rq))
+	    !cmpxchg(&rq->sched.engine->request_pool, NULL, rq))
 		return;
 
 	kmem_cache_free(global.slab_requests, rq);
@@ -246,7 +246,7 @@ static void __i915_request_fill(struct i915_request *rq, u8 val)
 
 static void remove_from_engine(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine, *locked;
+	struct i915_sched *engine, *locked;
 
 	/*
 	 * Virtual engines complicate acquiring the engine timeline lock,
@@ -254,11 +254,11 @@ static void remove_from_engine(struct i915_request *rq)
 	 * engine lock. The simple ploy we use is to take the lock then
 	 * check that the rq still belongs to the newly locked engine.
 	 */
-	locked = READ_ONCE(rq->__engine);
-	spin_lock_irq(&locked->sched.lock);
-	while (unlikely(locked != (engine = READ_ONCE(rq->__engine)))) {
-		spin_unlock(&locked->sched.lock);
-		spin_lock(&engine->sched.lock);
+	locked = READ_ONCE(rq->sched.engine);
+	spin_lock_irq(&locked->lock);
+	while (unlikely(locked != (engine = READ_ONCE(rq->sched.engine)))) {
+		spin_unlock(&locked->lock);
+		spin_lock(&engine->lock);
 		locked = engine;
 	}
 	list_del_init(&rq->sched.link);
@@ -269,7 +269,7 @@ static void remove_from_engine(struct i915_request *rq)
 	/* Prevent further __await_execution() registering a cb, then flush */
 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 
-	spin_unlock_irq(&locked->sched.lock);
+	spin_unlock_irq(&locked->lock);
 
 	__notify_execute_cb_imm(rq);
 }
@@ -464,7 +464,7 @@ struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 bool __i915_request_submit(struct i915_request *request,
 			   struct intel_engine_cs *engine)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
+	struct i915_sched *se = request->sched.engine;
 	bool result = false;
 
 	RQ_TRACE(request, "dl %llu\n", request->sched.deadline);
@@ -731,6 +731,11 @@ static void __i915_request_ctor(void *arg)
 	init_llist_head(&rq->execute_cb);
 }
 
+static struct i915_request **request_pool(struct intel_context *ce)
+{
+	return &intel_engine_get_scheduler(ce->engine)->request_pool;
+}
+
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
@@ -776,7 +781,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq = kmem_cache_alloc(global.slab_requests,
 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!rq)) {
-		rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
+		rq = request_alloc_slow(tl, request_pool(ce), gfp);
 		if (!rq) {
 			ret = -ENOMEM;
 			goto err_unreserve;
@@ -784,9 +789,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	}
 
 	rq->context = ce;
-	rq->__engine = ce->engine;
 	rq->ring = ce->ring;
-	rq->execution_mask = ce->engine->mask;
 
 	kref_init(&rq->fence.refcount);
 	rq->fence.flags = 0;
@@ -812,6 +815,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 
 	i915_sched_node_reinit(&rq->sched);
+	rq->sched.engine = intel_engine_get_scheduler(ce->engine);
+	rq->execution_mask = rq->sched.engine->mask;
 
 	/* No zalloc, everything must be cleared after use */
 	rq->batch = NULL;
@@ -1037,7 +1042,7 @@ emit_semaphore_wait(struct i915_request *to,
 		    struct i915_request *from,
 		    gfp_t gfp)
 {
-	const intel_engine_mask_t mask = READ_ONCE(from->__engine)->mask;
+	const intel_engine_mask_t mask = READ_ONCE(from->sched.engine)->mask;
 	struct i915_sw_fence *wait = &to->submit;
 
 	if (!intel_context_use_semaphores(to->context))
@@ -1072,7 +1077,7 @@ emit_semaphore_wait(struct i915_request *to,
 	if (__emit_semaphore_wait(to, from, from->fence.seqno))
 		goto await_fence;
 
-	to->sched.semaphores |= mask & ~to->__engine->mask;
+	to->sched.semaphores |= mask & ~to->sched.engine->mask;
 	wait = &to->semaphore;
 
 await_fence:
@@ -1276,7 +1281,7 @@ await_request_submit(struct i915_request *to, struct i915_request *from)
 	 * the waiter to be submitted immediately to the physical engine
 	 * as it may then bypass the virtual request.
 	 */
-	if (to->__engine == READ_ONCE(from->__engine))
+	if (to->sched.engine == READ_ONCE(from->sched.engine))
 		return i915_sw_fence_await_sw_fence_gfp(&to->submit,
 							&from->submit,
 							I915_FENCE_GFP);
@@ -1444,7 +1449,7 @@ static bool in_order_submission(const struct i915_request *prev,
 	if (likely(prev->context == rq->context))
 		return true;
 
-	return is_power_of_2(READ_ONCE(prev->__engine)->mask | rq->__engine->mask);
+	return is_power_of_2(READ_ONCE(prev->sched.engine)->mask | rq->sched.engine->mask);
 }
 
 static struct i915_request *
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 9c6ea5fa7b13..a3d294f2d068 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -56,7 +56,7 @@ struct i915_capture_list {
 
 #define RQ_TRACE(rq, fmt, ...) do {					\
 	const struct i915_request *rq__ = (rq);				\
-	ENGINE_TRACE(i915_request_get_engine(rq__),			\
+	SCHED_TRACE(rq__->sched.engine,					\
 		     "fence " RQ_FMT ", current %d " fmt,		\
 		     RQ_ARG(rq__), hwsp_seqno(rq__), ##__VA_ARGS__);	\
 } while (0)
@@ -176,7 +176,6 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct intel_engine_cs *__engine;
 	struct intel_context *context;
 	struct intel_ring *ring;
 	struct intel_timeline __rcu *timeline;
@@ -597,7 +596,7 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
 static inline struct i915_sched *
 i915_request_get_scheduler(const struct i915_request *rq)
 {
-	return intel_engine_get_scheduler(rq->__engine);
+	return rq->sched.engine;
 }
 
 static inline struct intel_engine_cs *
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 75dab453bfbc..64fb5205913a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -25,22 +25,22 @@ static struct i915_global_scheduler {
 
 /*
  * Virtual engines complicate acquiring the engine timeline lock,
- * as their rq->engine pointer is not stable until under that
+ * as their rq->sched.engine pointer is not stable until under that
  * engine lock. The simple ploy we use is to take the lock then
  * check that the rq still belongs to the newly locked engine.
  */
-#define lock_engine_irqsave(rq, flags) ({ \
+#define lock_sched_irqsave(rq, flags) ({ \
 	struct i915_request * const rq__ = (rq); \
-	struct intel_engine_cs *engine__ = READ_ONCE(rq__->__engine); \
+	struct i915_sched *se__ = READ_ONCE(rq__->sched.engine); \
 \
-	spin_lock_irqsave(&engine__->sched.lock, (flags)); \
-	while (engine__ != READ_ONCE((rq__)->__engine)) { \
-		spin_unlock(&engine__->sched.lock); \
-		engine__ = READ_ONCE(rq__->__engine); \
-		spin_lock(&engine__->sched.lock); \
+	spin_lock_irqsave(&se__->lock, (flags)); \
+	while (se__ != READ_ONCE((rq__)->sched.engine)) { \
+		spin_unlock(&se__->lock); \
+		se__ = READ_ONCE(rq__->sched.engine); \
+		spin_lock(&se__->lock); \
 	} \
 \
-	engine__; \
+	se__; \
 })
 
 static struct i915_sched_node *node_get(struct i915_sched_node *node)
@@ -307,6 +307,10 @@ void i915_sched_park(struct i915_sched *se)
 
 	GEM_BUG_ON(!i915_sched_is_idle(se));
 	se->no_priolist = false;
+
+	if (se->request_pool)
+		kmem_cache_free(i915_request_slab_cache(),
+				fetch_and_zero(&se->request_pool));
 }
 
 void i915_sched_fini(struct i915_sched *se)
@@ -598,8 +602,7 @@ static bool is_first_priolist(const struct i915_sched *se,
 
 static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 {
-	struct intel_engine_cs *engine = rq->__engine;
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
+	struct i915_sched *se = rq->sched.engine;
 	struct list_head *pos = &rq->sched.signalers_list;
 	struct list_head *plist;
 
@@ -626,7 +629,7 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 				if (__i915_request_is_complete(s))
 					continue;
 
-				if (s->__engine != engine) {
+				if (s->sched.engine != se) {
 					ipi_deadline(s, deadline);
 					continue;
 				}
@@ -648,7 +651,6 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 		 * any preemption required, be dealt with upon submission.
 		 * See engine->submit_request()
 		 */
-		GEM_BUG_ON(i915_request_get_engine(rq) != engine);
 		if (i915_request_in_priority_queue(rq))
 			remove_from_priolist(se, rq, plist, true);
 	} while ((rq = stack_pop(rq, &pos)));
@@ -658,14 +660,14 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 
 void i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 {
-	struct intel_engine_cs *engine;
+	struct i915_sched *se;
 	unsigned long flags;
 
 	if (deadline >= rq_deadline(rq))
 		return;
 
-	engine = lock_engine_irqsave(rq, flags);
-	if (!i915_sched_has_deadlines(&engine->sched))
+	se = lock_sched_irqsave(rq, flags);
+	if (!i915_sched_has_deadlines(se))
 		goto unlock;
 
 	if (deadline >= rq_deadline(rq))
@@ -676,12 +678,12 @@ void i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 
 	rcu_read_lock();
 	if (__i915_request_set_deadline(rq, deadline))
-		i915_sched_kick(&engine->sched);
+		i915_sched_kick(se);
 	rcu_read_unlock();
 	GEM_BUG_ON(rq_deadline(rq) != deadline);
 
 unlock:
-	spin_unlock_irqrestore(&engine->sched.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static u64 prio_slice(int prio)
@@ -857,12 +859,11 @@ static void ipi_priority(struct i915_request *rq, int prio)
 
 static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 {
-	struct intel_engine_cs *engine = rq->__engine;
+	struct i915_sched *se = rq->sched.engine;
 	struct list_head *pos = &rq->sched.signalers_list;
 	bool kick = false;
 
-	SCHED_TRACE(&engine->sched, "PI for " RQ_FMT ", prio:%d\n",
-		    RQ_ARG(rq), prio);
+	SCHED_TRACE(se, "PI for " RQ_FMT ", prio:%d\n", RQ_ARG(rq), prio);
 
 	/*
 	 * Recursively bump all dependent priorities to match the new request.
@@ -886,7 +887,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 	do {
 		struct i915_request *next;
 
-		if (i915_sched_has_priorities(i915_request_get_scheduler(rq))) {
+		if (i915_sched_has_priorities(se)) {
 			list_for_each_continue(pos, &rq->sched.signalers_list) {
 				struct i915_dependency *p =
 					list_entry(pos, typeof(*p), signal_link);
@@ -899,7 +900,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 				if (__i915_request_is_complete(s))
 					continue;
 
-				if (s->__engine != engine) {
+				if (s->sched.engine != se) {
 					ipi_priority(s, prio);
 					continue;
 				}
@@ -923,7 +924,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 		 * any preemption required, be dealt with upon submission.
 		 * See engine->submit_request()
 		 */
-		GEM_BUG_ON(rq->__engine != engine);
+		GEM_BUG_ON(rq->sched.engine != se);
 		if (i915_request_is_ready(rq) &&
 		    set_earliest_deadline(rq, rq_deadline(rq)))
 			kick = true;
@@ -939,7 +940,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
 
 void i915_request_set_priority(struct i915_request *rq, int prio)
 {
-	struct intel_engine_cs *engine;
+	struct i915_sched *se;
 	unsigned long flags;
 
 	if (prio <= rq_prio(rq))
@@ -977,7 +978,7 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
 			return;
 	}
 
-	engine = lock_engine_irqsave(rq, flags);
+	se = lock_sched_irqsave(rq, flags);
 	if (prio <= rq_prio(rq))
 		goto unlock;
 
@@ -986,20 +987,19 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
 
 	rcu_read_lock();
 	if (__i915_request_set_priority(rq, prio))
-		i915_sched_kick(&engine->sched);
+		i915_sched_kick(se);
 	rcu_read_unlock();
 	GEM_BUG_ON(rq_prio(rq) != prio);
 
 unlock:
-	spin_unlock_irqrestore(&engine->sched.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
-static void __defer_request(struct intel_engine_cs *engine,
+static void __defer_request(struct i915_sched *se,
 			    struct i915_request *rq,
 			    const u64 deadline)
 {
 	struct list_head *pos = &rq->sched.waiters_list;
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rn;
 	LIST_HEAD(dfs);
 
@@ -1025,7 +1025,7 @@ static void __defer_request(struct intel_engine_cs *engine,
 				continue;
 
 			/* Leave semaphores spinning on the other engines */
-			if (w->__engine != engine)
+			if (w->sched.engine != se)
 				continue;
 
 			/* No waiter should start before its signaler */
@@ -1060,7 +1060,7 @@ static void __defer_request(struct intel_engine_cs *engine,
 		WRITE_ONCE(rq->sched.deadline, deadline);
 
 		/* Note list is reversed for waiters wrt signal hierarchy */
-		GEM_BUG_ON(rq->__engine != engine);
+		GEM_BUG_ON(rq->sched.engine != se);
 		remove_from_priolist(se, rq, &dfs, false);
 
 		/* Track our visit, and prevent duplicate processing */
@@ -1074,10 +1074,9 @@ static void __defer_request(struct intel_engine_cs *engine,
 	}
 }
 
-void __i915_sched_defer_request(struct intel_engine_cs *engine,
-				struct i915_request *rq)
+void __i915_sched_defer_request(struct i915_sched *se, struct i915_request *rq)
 {
-	__defer_request(engine, rq,
+	__defer_request(se, rq,
 			max(rq_deadline(rq),
 			    next_virtual_deadline(adj_prio(rq))));
 }
@@ -1102,7 +1101,7 @@ static bool hold_request(const struct i915_request *rq)
 		const struct i915_request *s =
 			container_of(p->signaler, typeof(*s), sched);
 
-		if (s->__engine != rq->__engine)
+		if (s->sched.engine != rq->sched.engine)
 			continue;
 
 		result = i915_request_on_hold(s);
@@ -1121,21 +1120,18 @@ static bool ancestor_on_hold(const struct i915_sched *se,
 	return unlikely(!list_empty(&se->hold)) && hold_request(rq);
 }
 
-bool __i915_request_requeue(struct i915_request *rq,
-			    struct intel_engine_cs *engine)
+bool __i915_request_requeue(struct i915_request *rq, struct i915_sched *se)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
-
 	RQ_TRACE(rq, "transfer from %s to %s\n",
 		 i915_request_get_scheduler(rq)->dbg.name, se->dbg.name);
 
 	lockdep_assert_held(&se->lock);
 	lockdep_assert_held(&i915_request_get_scheduler(rq)->lock);
 	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
-	GEM_BUG_ON(rq->__engine == engine);
+	GEM_BUG_ON(rq->sched.engine == se);
 
 	remove_from_priolist(i915_request_get_scheduler(rq), rq, NULL, false);
-	WRITE_ONCE(rq->__engine, engine);
+	WRITE_ONCE(rq->sched.engine, se);
 
 	if (__i915_request_is_complete(rq)) {
 		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1154,7 +1150,7 @@ bool __i915_request_requeue(struct i915_request *rq,
 		/* Maintain request ordering wrt to existing on target */
 		__i915_request_set_deadline(rq, deadline);
 		if (!list_empty(&rq->sched.waiters_list))
-			__defer_request(engine, rq, deadline);
+			__defer_request(se, rq, deadline);
 
 		GEM_BUG_ON(rq_deadline(rq) == I915_DEADLINE_NEVER);
 	}
@@ -1165,7 +1161,7 @@ bool __i915_request_requeue(struct i915_request *rq,
 
 void i915_request_enqueue(struct i915_request *rq)
 {
-	struct i915_sched *se = i915_request_get_scheduler(rq);
+	struct i915_sched *se = rq->sched.engine;
 	u64 dl = earliest_deadline(rq);
 	unsigned long flags;
 	bool kick = false;
@@ -1194,10 +1190,8 @@ void i915_request_enqueue(struct i915_request *rq)
 		i915_sched_kick(se);
 }
 
-struct i915_request *
-__i915_sched_rewind_requests(struct intel_engine_cs *engine)
+struct i915_request *__i915_sched_rewind_requests(struct i915_sched *se)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq, *rn, *active = NULL;
 	u64 deadline = I915_DEADLINE_NEVER;
 	struct list_head *pl;
@@ -1245,14 +1239,13 @@ __i915_sched_rewind_requests(struct intel_engine_cs *engine)
 	return active;
 }
 
-bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool __i915_sched_suspend_request(struct i915_sched *se,
 				  struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	LIST_HEAD(list);
 
 	lockdep_assert_held(&se->lock);
-	GEM_BUG_ON(rq->__engine != engine);
+	GEM_BUG_ON(rq->sched.engine != se);
 
 	if (__i915_request_is_complete(rq)) /* too late! */
 		return false;
@@ -1287,7 +1280,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
 				continue;
 
 			/* Leave semaphores spinning on the other engines */
-			if (w->__engine != engine)
+			if (w->sched.engine != se)
 				continue;
 
 			if (!i915_request_is_ready(w))
@@ -1310,26 +1303,22 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
 	return true;
 }
 
-bool i915_sched_suspend_request(struct intel_engine_cs *engine,
-				struct i915_request *rq)
+bool i915_sched_suspend_request(struct i915_sched *se, struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	bool result;
 
 	if (i915_request_on_hold(rq))
 		return false;
 
 	spin_lock_irq(&se->lock);
-	result = __i915_sched_suspend_request(engine, rq);
+	result = __i915_sched_suspend_request(se, rq);
 	spin_unlock_irq(&se->lock);
 
 	return result;
 }
 
-void __i915_sched_resume_request(struct intel_engine_cs *engine,
-				 struct i915_request *rq)
+void __i915_sched_resume_request(struct i915_sched *se, struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	LIST_HEAD(list);
 	bool submit = false;
 
@@ -1369,7 +1358,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
 			if (rq->fence.error)
 				i915_request_set_error_once(w, rq->fence.error);
 
-			if (w->__engine != engine)
+			if (w->sched.engine != se)
 				continue;
 
 			/* We also treat the on-hold status as a visited bit */
@@ -1399,13 +1388,11 @@ void i915_request_update_deadline(struct i915_request *rq)
 	i915_request_set_deadline(rq, earliest_deadline(rq));
 }
 
-void i915_sched_resume_request(struct intel_engine_cs *engine,
+void i915_sched_resume_request(struct i915_sched *se,
 			       struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
-
 	spin_lock_irq(&se->lock);
-	__i915_sched_resume_request(engine, rq);
+	__i915_sched_resume_request(se, rq);
 	spin_unlock_irq(&se->lock);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 19e1ff25a0a3..8bd3e29bf801 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -15,7 +15,6 @@
 #include "i915_request.h"
 
 struct drm_printer;
-struct intel_engine_cs;
 
 #define SCHED_TRACE(se, fmt, ...) do {					\
 	const struct i915_sched *se__ __maybe_unused = (se);		\
@@ -55,21 +54,21 @@ void i915_request_update_deadline(struct i915_request *request);
 
 void i915_request_enqueue(struct i915_request *request);
 bool __i915_request_requeue(struct i915_request *rq,
-			    struct intel_engine_cs *engine);
+			    struct i915_sched *se);
 
 struct i915_request *
-__i915_sched_rewind_requests(struct intel_engine_cs *engine);
-void __i915_sched_defer_request(struct intel_engine_cs *engine,
+__i915_sched_rewind_requests(struct i915_sched *engine);
+void __i915_sched_defer_request(struct i915_sched *engine,
 				struct i915_request *request);
 
-bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool __i915_sched_suspend_request(struct i915_sched *engine,
 				  struct i915_request *rq);
-void __i915_sched_resume_request(struct intel_engine_cs *engine,
+void __i915_sched_resume_request(struct i915_sched *engine,
 				 struct i915_request *request);
 
-bool i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool i915_sched_suspend_request(struct i915_sched *engine,
 				struct i915_request *request);
-void i915_sched_resume_request(struct intel_engine_cs *engine,
+void i915_sched_resume_request(struct i915_sched *engine,
 			       struct i915_request *rq);
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 1cae67919045..c0a747e54c0a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -105,6 +105,9 @@ struct i915_sched {
 	 */
 	bool no_priolist;
 
+	/* keep a request in reserve for a [pm] barrier under oom */
+	struct i915_request *request_pool;
+
 	/* Pretty device names for debug messages */
 	struct {
 		struct device *dev;
@@ -156,6 +159,8 @@ struct i915_sched_attr {
  * others.
  */
 struct i915_sched_node {
+	struct i915_sched *engine;
+
 	spinlock_t lock; /* protect the lists */
 
 	struct list_head signalers_list; /* those before us, we depend upon */
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 0180a21fa1a7..d1ce2e05b6b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -606,14 +606,13 @@ static int igt_deadline_chains(void *arg)
 
 static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
 {
-	struct intel_engine_cs *engine = rq->__engine;
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
+	struct i915_sched *se = rq->sched.engine;
 
 	/* XXX No generic means to unwind incomplete requests yet */
 	if (!i915_request_in_priority_queue(rq))
 		return false;
 
-	if (!intel_engine_has_preemption(engine))
+	if (!intel_engine_has_preemption(rq->context->engine))
 		return false;
 
 	spin_lock_irq(&se->lock);
@@ -624,10 +623,10 @@ static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
 
 	/* Then the very first request must be the one everyone depends on */
 	rq = list_first_entry(lookup_priolist(se, 0), typeof(*rq), sched.link);
-	GEM_BUG_ON(rq->__engine != engine);
+	GEM_BUG_ON(rq->sched.engine != se);
 
 	/* Deferring the first request will then have to defer all requests */
-	__i915_sched_defer_request(engine, rq);
+	__i915_sched_defer_request(se, rq);
 
 	spin_unlock_irq(&se->lock);
 	return true;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list