[PATCH 41/48] drm/i915: Replace i915_request.engine with i915_request.sched.engine

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 9 01:21:44 UTC 2021


Now that the primary scheduling control is separated from the CS engine,
we want to track the scheduler itself on the request (the scheduler's
job). We have very little use for the physical engine outside of request
submission, so demote its backpointer.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine.h        |   2 -
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  14 +--
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   3 -
 .../drm/i915/gt/intel_execlists_submission.c  |  32 +++---
 .../gpu/drm/i915/gt/intel_ring_scheduler.c    |   2 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  13 +--
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |   4 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |   2 +-
 drivers/gpu/drm/i915/i915_request.c           |  39 ++++---
 drivers/gpu/drm/i915/i915_request.h           |   5 +-
 drivers/gpu/drm/i915/i915_scheduler.c         | 100 ++++++++----------
 drivers/gpu/drm/i915/i915_scheduler.h         |  15 ++-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |   5 +
 .../gpu/drm/i915/selftests/i915_scheduler.c   |   9 +-
 14 files changed, 113 insertions(+), 132 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 915511714ac5..734f43d5d3dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -182,8 +182,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 int intel_engines_init_mmio(struct intel_gt *gt);
 int intel_engines_init(struct intel_gt *gt);
 
-void intel_engine_free_request_pool(struct intel_engine_cs *engine);
-
 void intel_engines_release(struct intel_gt *gt);
 void intel_engines_free(struct intel_gt *gt);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index b5c01e9b16b8..79985304c73e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -403,6 +403,9 @@ void intel_engines_release(struct intel_gt *gt)
 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
 		__intel_gt_reset(gt, ALL_ENGINES);
 
+	/* Flush rcu'ed i915_fence_free before removing the engines */
+	rcu_barrier();
+
 	/* Decouple the backend; but keep the layout for late GPU resets */
 	for_each_engine(engine, gt, id) {
 		if (!engine->release)
@@ -418,14 +421,6 @@ void intel_engines_release(struct intel_gt *gt)
 	}
 }
 
-void intel_engine_free_request_pool(struct intel_engine_cs *engine)
-{
-	if (!engine->request_pool)
-		return;
-
-	kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
-}
-
 void intel_engines_free(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
@@ -435,7 +430,6 @@ void intel_engines_free(struct intel_gt *gt)
 	rcu_barrier();
 
 	for_each_engine(engine, gt, id) {
-		intel_engine_free_request_pool(engine);
 		kfree(engine);
 		gt->engine[id] = NULL;
 	}
@@ -744,9 +738,9 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
 	if (!frame)
 		return -ENOMEM;
 
-	frame->rq.__engine = engine;
 	frame->rq.context = ce;
 	rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+	frame->rq.sched.engine = se;
 
 	frame->ring.vaddr = frame->cs;
 	frame->ring.size = sizeof(frame->cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index b2865d4cd76c..5b4eed376f81 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -296,9 +296,6 @@ struct intel_engine_cs {
 
 	struct i915_sched sched;
 
-	/* keep a request in reserve for a [pm] barrier under oom */
-	struct i915_request *request_pool;
-
 	struct llist_head barrier_tasks;
 
 	struct intel_context *kernel_context; /* pinned */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 26b77dc51ae8..87b9d7c5a5d7 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -179,7 +179,7 @@ struct virtual_engine {
 	 * use one of sibling_mask physical engines.
 	 */
 	struct ve_bond {
-		const struct intel_engine_cs *master;
+		const struct i915_sched *master;
 		intel_engine_mask_t sibling_mask;
 	} *bonds;
 	unsigned int num_bonds;
@@ -541,10 +541,10 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
 	/* Resubmit the queue in execution order */
 	spin_lock(&se->lock);
 	list_for_each_entry_from(pos, &tl->requests, link) {
-		if (pos->__engine == &ve->base)
+		if (pos->sched.engine == se)
 			break;
 
-		__i915_request_requeue(pos, &ve->base);
+		__i915_request_requeue(pos, se);
 	}
 	spin_unlock(&se->lock);
 
@@ -1245,7 +1245,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			     yesno(engine != ve->siblings[0]));
 
 		GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-		if (__i915_request_requeue(rq, engine)) {
+		if (__i915_request_requeue(rq, &engine->sched)) {
 			/*
 			 * Only after we confirm that we will submit
 			 * this request (i.e. it has not already
@@ -1265,7 +1265,6 @@ static void virtual_requeue(struct intel_engine_cs *engine,
 			if (!ve->context.inflight)
 				WRITE_ONCE(ve->context.inflight, engine);
 
-			GEM_BUG_ON(rq->__engine != engine);
 			GEM_BUG_ON(ve->siblings[0] != engine);
 			GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
 
@@ -1415,7 +1414,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * the preemption, some of the unwound requests may
 		 * complete!
 		 */
-		last = __i915_sched_rewind_requests(engine);
+		last = __i915_sched_rewind_requests(se);
 
 		/*
 		 * We want to move the interrupted request to the back of
@@ -1425,7 +1424,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * be run after it again.
 		 */
 		if (last && defer)
-			__i915_sched_defer_request(engine, last);
+			__i915_sched_defer_request(se, last);
 
 		last = NULL;
 	}
@@ -1965,7 +1964,7 @@ static void execlists_capture_work(struct work_struct *work)
 	i915_gpu_coredump_put(cap->error);
 
 	/* Return this request and all that depend upon it for signaling */
-	i915_sched_resume_request(cap->rq->__engine, cap->rq);
+	i915_sched_resume_request(cap->rq->sched.engine, cap->rq);
 	i915_request_put(cap->rq);
 
 	kfree(cap);
@@ -2099,7 +2098,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
 	 * simply hold that request accountable for being non-preemptible
 	 * long enough to force the reset.
 	 */
-	if (!i915_sched_suspend_request(engine, cap->rq))
+	if (!i915_sched_suspend_request(&engine->sched, cap->rq))
 		goto err_rq;
 
 	INIT_WORK(&cap->work, execlists_capture_work);
@@ -2703,7 +2702,7 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	/* Push back any incomplete requests for replay after the reset. */
 	rcu_read_lock();
 	spin_lock_irqsave(&se->lock, flags);
-	__i915_sched_rewind_requests(engine);
+	__i915_sched_rewind_requests(se);
 	spin_unlock_irqrestore(&se->lock, flags);
 	rcu_read_unlock();
 }
@@ -3094,8 +3093,6 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
 	lrc_fini(&ve->context);
 	intel_context_fini(&ve->context);
 
-	intel_engine_free_request_pool(&ve->base);
-
 	kfree(ve->bonds);
 	kfree(ve);
 }
@@ -3321,8 +3318,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
 }
 
 static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
-		  const struct intel_engine_cs *master)
+virtual_find_bond(struct virtual_engine *ve, const struct i915_sched *master)
 {
 	int i;
 
@@ -3341,9 +3337,9 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 	intel_engine_mask_t allowed, exec;
 	struct ve_bond *bond;
 
-	allowed = ~to_request(signal)->__engine->mask;
+	allowed = ~to_request(signal)->sched.engine->mask;
 
-	bond = virtual_find_bond(ve, to_request(signal)->__engine);
+	bond = virtual_find_bond(ve, to_request(signal)->sched.engine);
 	if (bond)
 		allowed &= bond->sibling_mask;
 
@@ -3526,7 +3522,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (n == ve->num_siblings)
 		return -EINVAL;
 
-	bond = virtual_find_bond(ve, master);
+	bond = virtual_find_bond(ve, &master->sched);
 	if (bond) {
 		bond->sibling_mask |= sibling->mask;
 		return 0;
@@ -3538,7 +3534,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 	if (!bond)
 		return -ENOMEM;
 
-	bond[ve->num_bonds].master = master;
+	bond[ve->num_bonds].master = &master->sched;
 	bond[ve->num_bonds].sibling_mask = sibling->mask;
 
 	ve->bonds = bond;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index de839ee71b97..95185d0d4c69 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -669,7 +669,7 @@ static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
 
 	rcu_read_lock();
 	spin_lock_irqsave(&se->lock, flags);
-	rq = __i915_sched_rewind_requests(engine);
+	rq = __i915_sched_rewind_requests(se);
 	spin_unlock_irqrestore(&se->lock, flags);
 	if (rq && __i915_request_has_started(rq))
 		__i915_request_reset(rq, stalled);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 36725b7bc391..58d829865735 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -69,13 +69,14 @@ static struct i915_request *first_request(struct mock_engine *engine)
 					mock.link);
 }
 
-static void advance(struct i915_request *request)
+static void advance(struct i915_request *request,
+		    struct mock_engine *engine)
 {
 	list_del_init(&request->mock.link);
 	i915_request_mark_complete(request);
 	GEM_BUG_ON(!i915_request_completed(request));
 
-	intel_engine_signal_breadcrumbs(i915_request_get_engine(request));
+	intel_engine_signal_breadcrumbs(&engine->base);
 }
 
 static void hw_delay_complete(struct timer_list *t)
@@ -89,7 +90,7 @@ static void hw_delay_complete(struct timer_list *t)
 	/* Timer fired, first request is complete */
 	request = first_request(engine);
 	if (request)
-		advance(request);
+		advance(request, engine);
 
 	/*
 	 * Also immediately signal any subsequent 0-delay requests, but
@@ -102,7 +103,7 @@ static void hw_delay_complete(struct timer_list *t)
 			break;
 		}
 
-		advance(request);
+		advance(request, engine);
 	}
 
 	spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -219,7 +220,7 @@ static void mock_submit_request(struct i915_request *request)
 			mod_timer(&engine->hw_delay,
 				  jiffies + request->mock.delay);
 		else
-			advance(request);
+			advance(request, engine);
 	}
 	spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
@@ -374,7 +375,7 @@ void mock_engine_flush(struct intel_engine_cs *engine)
 
 	spin_lock_irq(&mock->hw_lock);
 	list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
-		advance(request);
+		advance(request, mock);
 	spin_unlock_irq(&mock->hw_lock);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index c98ca97f851a..bdec2c02410d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -609,7 +609,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
 		i915_request_get(rq);
-		i915_sched_suspend_request(engine, rq);
+		i915_sched_suspend_request(&engine->sched, rq);
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		__intel_engine_reset_bh(engine, NULL);
@@ -631,7 +631,7 @@ static int live_hold_reset(void *arg)
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 
 		/* But is resubmitted on release */
-		i915_sched_resume_request(engine, rq);
+		i915_sched_resume_request(&engine->sched, rq);
 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 			pr_err("%s: held request did not complete!\n",
 			       engine->name);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 164e4c2384b3..881c6d62cf47 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -308,7 +308,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 	spin_lock_irqsave(&se->lock, flags);
 
 	/* Push back any incomplete requests for replay after the reset. */
-	rq = __i915_sched_rewind_requests(engine);
+	rq = __i915_sched_rewind_requests(&engine->sched);
 	if (!rq)
 		goto out_unlock;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5ebf0e5166f8..80a3214d4ecf 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -58,7 +58,7 @@ static struct i915_global_request {
 
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
-	return dev_name(to_request(fence)->__engine->i915->drm.dev);
+	return dev_name(to_request(fence)->sched.engine->dbg.dev);
 }
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -154,7 +154,7 @@ static void i915_fence_release(struct dma_fence *fence)
 	 * can be a physical engine with the exact corresponding mask.
 	 */
 	if (is_power_of_2(rq->execution_mask) &&
-	    !cmpxchg(&rq->__engine->request_pool, NULL, rq))
+	    !cmpxchg(&rq->sched.engine->request_pool, NULL, rq))
 		return;
 
 	kmem_cache_free(global.slab_requests, rq);
@@ -246,7 +246,7 @@ static void __i915_request_fill(struct i915_request *rq, u8 val)
 
 static void remove_from_engine(struct i915_request *rq)
 {
-	struct intel_engine_cs *engine, *locked;
+	struct i915_sched *engine, *locked;
 
 	/*
 	 * Virtual engines complicate acquiring the engine timeline lock,
@@ -254,11 +254,11 @@ static void remove_from_engine(struct i915_request *rq)
 	 * engine lock. The simple ploy we use is to take the lock then
 	 * check that the rq still belongs to the newly locked engine.
 	 */
-	locked = READ_ONCE(rq->__engine);
-	spin_lock_irq(&locked->sched.lock);
-	while (unlikely(locked != (engine = READ_ONCE(rq->__engine)))) {
-		spin_unlock(&locked->sched.lock);
-		spin_lock(&engine->sched.lock);
+	locked = READ_ONCE(rq->sched.engine);
+	spin_lock_irq(&locked->lock);
+	while (unlikely(locked != (engine = READ_ONCE(rq->sched.engine)))) {
+		spin_unlock(&locked->lock);
+		spin_lock(&engine->lock);
 		locked = engine;
 	}
 	list_del_init(&rq->sched.link);
@@ -269,7 +269,7 @@ static void remove_from_engine(struct i915_request *rq)
 	/* Prevent further __await_execution() registering a cb, then flush */
 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 
-	spin_unlock_irq(&locked->sched.lock);
+	spin_unlock_irq(&locked->lock);
 
 	__notify_execute_cb_imm(rq);
 }
@@ -464,7 +464,7 @@ struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 bool __i915_request_submit(struct i915_request *request,
 			   struct intel_engine_cs *engine)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
+	struct i915_sched *se = request->sched.engine;
 	bool result = false;
 
 	RQ_TRACE(request, "dl %llu\n", request->sched.deadline);
@@ -731,6 +731,11 @@ static void __i915_request_ctor(void *arg)
 	init_llist_head(&rq->execute_cb);
 }
 
+static struct i915_request **request_pool(struct intel_context *ce)
+{
+	return &intel_engine_get_scheduler(ce->engine)->request_pool;
+}
+
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
@@ -776,7 +781,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq = kmem_cache_alloc(global.slab_requests,
 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!rq)) {
-		rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
+		rq = request_alloc_slow(tl, request_pool(ce), gfp);
 		if (!rq) {
 			ret = -ENOMEM;
 			goto err_unreserve;
@@ -784,9 +789,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	}
 
 	rq->context = ce;
-	rq->__engine = ce->engine;
 	rq->ring = ce->ring;
-	rq->execution_mask = ce->engine->mask;
 
 	kref_init(&rq->fence.refcount);
 	rq->fence.flags = 0;
@@ -812,6 +815,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 
 	i915_sched_node_reinit(&rq->sched);
+	rq->sched.engine = intel_engine_get_scheduler(ce->engine);
+	rq->execution_mask = rq->sched.engine->mask;
 
 	/* No zalloc, everything must be cleared after use */
 	rq->batch = NULL;
@@ -1037,7 +1042,7 @@ emit_semaphore_wait(struct i915_request *to,
 		    struct i915_request *from,
 		    gfp_t gfp)
 {
-	const intel_engine_mask_t mask = READ_ONCE(from->__engine)->mask;
+	const intel_engine_mask_t mask = READ_ONCE(from->sched.engine)->mask;
 	struct i915_sw_fence *wait = &to->submit;
 
 	if (!intel_context_use_semaphores(to->context))
@@ -1072,7 +1077,7 @@ emit_semaphore_wait(struct i915_request *to,
 	if (__emit_semaphore_wait(to, from, from->fence.seqno))
 		goto await_fence;
 
-	to->sched.semaphores |= mask & ~to->__engine->mask;
+	to->sched.semaphores |= mask & ~to->sched.engine->mask;
 	wait = &to->semaphore;
 
 await_fence:
@@ -1276,7 +1281,7 @@ await_request_submit(struct i915_request *to, struct i915_request *from)
 	 * the waiter to be submitted immediately to the physical engine
 	 * as it may then bypass the virtual request.
 	 */
-	if (to->__engine == READ_ONCE(from->__engine))
+	if (to->sched.engine == READ_ONCE(from->sched.engine))
 		return i915_sw_fence_await_sw_fence_gfp(&to->submit,
 							&from->submit,
 							I915_FENCE_GFP);
@@ -1444,7 +1449,7 @@ static bool in_order_submission(const struct i915_request *prev,
 	if (likely(prev->context == rq->context))
 		return true;
 
-	return is_power_of_2(READ_ONCE(prev->__engine)->mask | rq->__engine->mask);
+	return is_power_of_2(READ_ONCE(prev->sched.engine)->mask | rq->sched.engine->mask);
 }
 
 static struct i915_request *
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 94446ae721f9..136426237d83 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -56,7 +56,7 @@ struct i915_capture_list {
 
 #define RQ_TRACE(rq, fmt, ...) do {					\
 	const struct i915_request *rq__ = (rq);				\
-	ENGINE_TRACE(i915_request_get_engine(rq__),			\
+	SCHED_TRACE(rq__->sched.engine,					\
 		     "fence " RQ_FMT ", current %d " fmt,		\
 		     RQ_ARG(rq__), hwsp_seqno(rq__), ##__VA_ARGS__);	\
 } while (0)
@@ -176,7 +176,6 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct intel_engine_cs *__engine;
 	struct intel_context *context;
 	struct intel_ring *ring;
 	struct intel_timeline __rcu *timeline;
@@ -598,7 +597,7 @@ static inline struct i915_sched *
 i915_request_get_scheduler(const struct i915_request *rq)
 {
 	/* Is there an active scheduler for this request? */
-	return intel_engine_get_scheduler(rq->__engine);
+	return READ_ONCE(rq->sched.engine);
 }
 
 static inline struct intel_engine_cs *
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 62b21661732c..9a1324c480bd 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -25,22 +25,22 @@ static struct i915_global_scheduler {
 
 /*
  * Virtual engines complicate acquiring the engine timeline lock,
- * as their rq->engine pointer is not stable until under that
+ * as their rq->sched.engine pointer is not stable until under that
  * engine lock. The simple ploy we use is to take the lock then
  * check that the rq still belongs to the newly locked engine.
  */
-#define lock_engine_irqsave(rq, flags) ({ \
+#define lock_sched_irqsave(rq, flags) ({ \
 	struct i915_request * const rq__ = (rq); \
-	struct intel_engine_cs *engine__ = READ_ONCE(rq__->__engine); \
+	struct i915_sched *se__ = READ_ONCE(rq__->sched.engine); \
 \
-	spin_lock_irqsave(&engine__->sched.lock, (flags)); \
-	while (engine__ != READ_ONCE((rq__)->__engine)) { \
-		spin_unlock(&engine__->sched.lock); \
-		engine__ = READ_ONCE(rq__->__engine); \
-		spin_lock(&engine__->sched.lock); \
+	spin_lock_irqsave(&se__->lock, (flags)); \
+	while (se__ != READ_ONCE((rq__)->sched.engine)) { \
+		spin_unlock(&se__->lock); \
+		se__ = READ_ONCE(rq__->sched.engine); \
+		spin_lock(&se__->lock); \
 	} \
 \
-	engine__; \
+	se__; \
 })
 
 static struct i915_sched_node *node_get(struct i915_sched_node *node)
@@ -281,6 +281,10 @@ void i915_sched_park(struct i915_sched *se)
 
 	GEM_BUG_ON(!i915_sched_is_idle(se));
 	se->no_priolist = false;
+
+	if (se->request_pool)
+		kmem_cache_free(i915_request_slab_cache(),
+				fetch_and_zero(&se->request_pool));
 }
 
 void i915_sched_fini(struct i915_sched *se)
@@ -579,7 +583,6 @@ __i915_request_set_deadline(struct i915_sched * const se,
 			    struct i915_request *rq,
 			    u64 deadline)
 {
-	struct intel_engine_cs *engine = rq->__engine;
 	struct list_head *pos = &rq->sched.signalers_list;
 	struct list_head *plist;
 
@@ -605,7 +608,7 @@ __i915_request_set_deadline(struct i915_sched * const se,
 				if (__i915_request_is_complete(s))
 					continue;
 
-				if (s->__engine != engine) {
+				if (s->sched.engine != se) {
 					ipi_deadline(s, deadline);
 					continue;
 				}
@@ -637,14 +640,14 @@ __i915_request_set_deadline(struct i915_sched * const se,
 
 void i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 {
-	struct intel_engine_cs *engine;
+	struct i915_sched *se;
 	unsigned long flags;
 
 	if (deadline >= rq_deadline(rq))
 		return;
 
-	engine = lock_engine_irqsave(rq, flags);
-	if (!i915_sched_has_deadlines(&engine->sched))
+	se = lock_sched_irqsave(rq, flags);
+	if (!i915_sched_has_deadlines(se))
 		goto unlock;
 
 	if (deadline >= rq_deadline(rq))
@@ -654,13 +657,13 @@ void i915_request_set_deadline(struct i915_request *rq, u64 deadline)
 		goto unlock;
 
 	rcu_read_lock();
-	if (__i915_request_set_deadline(&engine->sched, rq, deadline))
-		i915_sched_kick(&engine->sched);
+	if (__i915_request_set_deadline(se, rq, deadline))
+		i915_sched_kick(se);
 	rcu_read_unlock();
 	GEM_BUG_ON(rq_deadline(rq) != deadline);
 
 unlock:
-	spin_unlock_irqrestore(&engine->sched.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static u64 prio_slice(int prio)
@@ -835,7 +838,6 @@ __i915_request_set_priority(struct i915_sched * const se,
 			    struct i915_request *rq,
 			    int prio)
 {
-	struct intel_engine_cs *engine = rq->__engine;
 	struct list_head *pos = &rq->sched.signalers_list;
 	bool kick = false;
 
@@ -863,7 +865,7 @@ __i915_request_set_priority(struct i915_sched * const se,
 	do {
 		struct i915_request *next;
 
-		if (i915_sched_has_priorities(i915_request_get_scheduler(rq))) {
+		if (i915_sched_has_priorities(se)) {
 			list_for_each_continue(pos, &rq->sched.signalers_list) {
 				struct i915_dependency *p = as_dependency(pos);
 				struct i915_request *s =
@@ -875,7 +877,7 @@ __i915_request_set_priority(struct i915_sched * const se,
 				if (__i915_request_is_complete(s))
 					continue;
 
-				if (s->__engine != engine) {
+				if (s->sched.engine != se) {
 					ipi_priority(s, prio);
 					continue;
 				}
@@ -899,7 +901,7 @@ __i915_request_set_priority(struct i915_sched * const se,
 		 * any preemption required, be dealt with upon submission.
 		 * See engine->submit_request()
 		 */
-		GEM_BUG_ON(rq->__engine != engine);
+		GEM_BUG_ON(rq->sched.engine != se);
 		if (i915_request_is_ready(rq) &&
 		    set_earliest_deadline(se, rq, rq_deadline(rq)))
 			kick = true;
@@ -915,7 +917,7 @@ __i915_request_set_priority(struct i915_sched * const se,
 
 void i915_request_set_priority(struct i915_request *rq, int prio)
 {
-	struct intel_engine_cs *engine;
+	struct i915_sched *se;
 	unsigned long flags;
 
 	if (prio <= rq_prio(rq))
@@ -953,7 +955,7 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
 			return;
 	}
 
-	engine = lock_engine_irqsave(rq, flags);
+	se = lock_sched_irqsave(rq, flags);
 	if (prio <= rq_prio(rq))
 		goto unlock;
 
@@ -961,13 +963,13 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
 		goto unlock;
 
 	rcu_read_lock();
-	if (__i915_request_set_priority(&engine->sched, rq, prio))
-		i915_sched_kick(&engine->sched);
+	if (__i915_request_set_priority(se, rq, prio))
+		i915_sched_kick(se);
 	rcu_read_unlock();
 	GEM_BUG_ON(rq_prio(rq) != prio);
 
 unlock:
-	spin_unlock_irqrestore(&engine->sched.lock, flags);
+	spin_unlock_irqrestore(&se->lock, flags);
 }
 
 static void __defer_request(struct i915_sched * const se,
@@ -1000,7 +1002,7 @@ static void __defer_request(struct i915_sched * const se,
 				continue;
 
 			/* Leave semaphores spinning on the other engines */
-			if (w->__engine != rq->__engine)
+			if (w->sched.engine != se)
 				continue;
 
 			/* No waiter should start before its signaler */
@@ -1049,10 +1051,8 @@ static void __defer_request(struct i915_sched * const se,
 	}
 }
 
-void __i915_sched_defer_request(struct intel_engine_cs *engine,
-				struct i915_request *rq)
+void __i915_sched_defer_request(struct i915_sched *se, struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	u64 deadline;
 
 	deadline = rq_deadline(rq);
@@ -1082,7 +1082,7 @@ static bool hold_request(const struct i915_request *rq)
 		const struct i915_request *s =
 			container_of(p->signaler, typeof(*s), sched);
 
-		if (s->__engine != rq->__engine)
+		if (s->sched.engine != rq->sched.engine)
 			continue;
 
 		result = i915_request_on_hold(s);
@@ -1101,21 +1101,18 @@ static bool ancestor_on_hold(const struct i915_sched *se,
 	return unlikely(!list_empty(&se->hold)) && hold_request(rq);
 }
 
-bool __i915_request_requeue(struct i915_request *rq,
-			    struct intel_engine_cs *engine)
+bool __i915_request_requeue(struct i915_request *rq, struct i915_sched *se)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
-
 	RQ_TRACE(rq, "transfer from %s to %s\n",
 		 i915_request_get_scheduler(rq)->dbg.name, se->dbg.name);
 
 	lockdep_assert_held(&se->lock);
 	lockdep_assert_held(&i915_request_get_scheduler(rq)->lock);
 	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
-	GEM_BUG_ON(rq->__engine == engine);
+	GEM_BUG_ON(rq->sched.engine == se);
 
 	remove_from_priolist(i915_request_get_scheduler(rq), rq, NULL, false);
-	WRITE_ONCE(rq->__engine, engine);
+	WRITE_ONCE(rq->sched.engine, se);
 
 	if (__i915_request_is_complete(rq)) {
 		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1174,10 +1171,8 @@ void i915_request_enqueue(struct i915_request *rq)
 		i915_sched_kick(se);
 }
 
-struct i915_request *
-__i915_sched_rewind_requests(struct intel_engine_cs *engine)
+struct i915_request *__i915_sched_rewind_requests(struct i915_sched *se)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	struct i915_request *rq, *rn, *active = NULL;
 	u64 deadline = I915_DEADLINE_NEVER;
 	struct list_head *pl;
@@ -1226,14 +1221,13 @@ __i915_sched_rewind_requests(struct intel_engine_cs *engine)
 	return active;
 }
 
-bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool __i915_sched_suspend_request(struct i915_sched *se,
 				  struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	LIST_HEAD(list);
 
 	lockdep_assert_held(&se->lock);
-	GEM_BUG_ON(rq->__engine != engine);
+	GEM_BUG_ON(rq->sched.engine != se);
 
 	if (__i915_request_is_complete(rq)) /* too late! */
 		return false;
@@ -1268,7 +1262,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
 				continue;
 
 			/* Leave semaphores spinning on the other engines */
-			if (w->__engine != engine)
+			if (w->sched.engine != se)
 				continue;
 
 			if (!i915_request_is_ready(w))
@@ -1291,26 +1285,22 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
 	return true;
 }
 
-bool i915_sched_suspend_request(struct intel_engine_cs *engine,
-				struct i915_request *rq)
+bool i915_sched_suspend_request(struct i915_sched *se, struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	bool result;
 
 	if (i915_request_on_hold(rq))
 		return false;
 
 	spin_lock_irq(&se->lock);
-	result = __i915_sched_suspend_request(engine, rq);
+	result = __i915_sched_suspend_request(se, rq);
 	spin_unlock_irq(&se->lock);
 
 	return result;
 }
 
-void __i915_sched_resume_request(struct intel_engine_cs *engine,
-				 struct i915_request *rq)
+void __i915_sched_resume_request(struct i915_sched *se, struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
 	LIST_HEAD(list);
 	bool submit = false;
 
@@ -1350,7 +1340,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
 			if (rq->fence.error)
 				i915_request_set_error_once(w, rq->fence.error);
 
-			if (w->__engine != engine)
+			if (w->sched.engine != se)
 				continue;
 
 			/* We also treat the on-hold status as a visited bit */
@@ -1386,13 +1376,11 @@ void i915_request_update_deadline(struct i915_request *rq)
 	i915_request_set_deadline(rq, update_deadline(rq));
 }
 
-void i915_sched_resume_request(struct intel_engine_cs *engine,
+void i915_sched_resume_request(struct i915_sched *se,
 			       struct i915_request *rq)
 {
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
-
 	spin_lock_irq(&se->lock);
-	__i915_sched_resume_request(engine, rq);
+	__i915_sched_resume_request(se, rq);
 	spin_unlock_irq(&se->lock);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 522dfc5eb249..046d22e92f70 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -15,7 +15,6 @@
 #include "i915_request.h"
 
 struct drm_printer;
-struct intel_engine_cs;
 
 #define SCHED_TRACE(se, fmt, ...) do {					\
 	const struct i915_sched *se__ __maybe_unused = (se);		\
@@ -56,21 +55,21 @@ void i915_request_update_deadline(struct i915_request *request);
 
 void i915_request_enqueue(struct i915_request *request);
 bool __i915_request_requeue(struct i915_request *rq,
-			    struct intel_engine_cs *engine);
+			    struct i915_sched *se);
 
 struct i915_request *
-__i915_sched_rewind_requests(struct intel_engine_cs *engine);
-void __i915_sched_defer_request(struct intel_engine_cs *engine,
+__i915_sched_rewind_requests(struct i915_sched *engine);
+void __i915_sched_defer_request(struct i915_sched *engine,
 				struct i915_request *request);
 
-bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool __i915_sched_suspend_request(struct i915_sched *engine,
 				  struct i915_request *rq);
-void __i915_sched_resume_request(struct intel_engine_cs *engine,
+void __i915_sched_resume_request(struct i915_sched *engine,
 				 struct i915_request *request);
 
-bool i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool i915_sched_suspend_request(struct i915_sched *engine,
 				struct i915_request *request);
-void i915_sched_resume_request(struct intel_engine_cs *engine,
+void i915_sched_resume_request(struct i915_sched *engine,
 			       struct i915_request *rq);
 
 void __i915_sched_cancel_queue(struct i915_sched *se);
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 89cccda35ecd..fd2e06ab6c66 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -143,6 +143,9 @@ struct i915_sched {
 	 */
 	bool no_priolist;
 
+	/* keep a request in reserve for a [pm] barrier under oom */
+	struct i915_request *request_pool;
+
 	/* Pretty device names for debug messages */
 	struct {
 		struct device *dev;
@@ -194,6 +197,8 @@ struct i915_sched_attr {
  * others.
  */
 struct i915_sched_node {
+	struct i915_sched *engine;
+
 	spinlock_t lock; /* protect the lists */
 
 	struct list_head signalers_list; /* those before us, we depend upon */
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index fea3f4b46e59..2323da819785 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -606,14 +606,13 @@ static int igt_deadline_chains(void *arg)
 
 static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
 {
-	struct intel_engine_cs *engine = rq->__engine;
-	struct i915_sched *se = intel_engine_get_scheduler(engine);
+	struct i915_sched *se = rq->sched.engine;
 
 	/* XXX No generic means to unwind incomplete requests yet */
 	if (!i915_request_in_priority_queue(rq))
 		return false;
 
-	if (!intel_engine_has_preemption(engine))
+	if (!intel_engine_has_preemption(rq->context->engine))
 		return false;
 
 	spin_lock_irq(&se->lock);
@@ -624,10 +623,10 @@ static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
 
 	/* Then the very first request must be the one everyone depends on */
 	rq = list_first_entry(lookup_priolist(se, 0), typeof(*rq), sched.link);
-	GEM_BUG_ON(rq->__engine != engine);
+	GEM_BUG_ON(rq->sched.engine != se);
 
 	/* Deferring the first request will then have to defer all requests */
-	__i915_sched_defer_request(engine, rq);
+	__i915_sched_defer_request(se, rq);
 
 	spin_unlock_irq(&se->lock);
 	return true;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list