[PATCH 55/57] drm/i915: Move i915_request.execution_mask to i915_request.sched

Chris Wilson chris at chris-wilson.co.uk
Wed Feb 3 23:49:06 UTC 2021


As was apparent in the last patch, the i915_request.execution_mask is
driven by the scheduler.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/gen8_engine_cs.c      |  2 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 24 +++++++++----------
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |  4 ++--
 drivers/gpu/drm/i915/i915_request.c           |  4 ++--
 drivers/gpu/drm/i915/i915_request.h           |  1 -
 drivers/gpu/drm/i915/i915_scheduler.c         |  2 +-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  1 +
 7 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 4e8dddc817a5..bdc6e5e40b14 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -287,7 +287,7 @@ int gen12_emit_flush_xcs(const struct intel_engine_cs *engine,
 	if (mode & EMIT_INVALIDATE)
 		cmd += 2;
 	if (mode & EMIT_INVALIDATE)
-		aux_inv = rq->execution_mask & ~BIT(BCS0);
+		aux_inv = rq->sched.execution & ~BIT(BCS0);
 	if (aux_inv)
 		cmd += 2 * hweight8(aux_inv) + 2;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 528b4c7e01ed..39e255271f59 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -706,7 +706,7 @@ static void kick_siblings(struct intel_execlists *el,
 	 * same as other native request.
 	 */
 	if (i915_request_in_priority_queue(rq) &&
-	    rq->execution_mask != BIT(el->id))
+	    rq->sched.execution != BIT(el->id))
 		resubmit_virtual_request(el, rq, ve);
 
 	if (!i915_sched_is_idle(ve->base.sched))
@@ -962,7 +962,7 @@ assert_pending_valid(const struct intel_execlists *el,
 		 * that they are never stuck behind a hog and can be immediately
 		 * transferred onto the next idle engine.
 		 */
-		if (rq->execution_mask != BIT(el->id) && port != el->pending) {
+		if (rq->sched.execution != BIT(el->id) && port != el->pending) {
 			GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
 				      el->sched.dbg.name,
 				      ce->timeline->fence_context,
@@ -1115,7 +1115,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	if (!rq)
 		return false;
 
-	if (!(rq->execution_mask & el->sched.mask)) /* We peeked too soon! */
+	if (!(rq->sched.execution & el->sched.mask)) /* We peeked too soon! */
 		return false;
 
 	/*
@@ -1377,7 +1377,7 @@ static void virtual_requeue(struct intel_execlists *el,
 			     rq_deadline(rq),
 			     yesno(el->sched.priv != ve->siblings[0]));
 
-		GEM_BUG_ON(!(rq->execution_mask & BIT(el->id)));
+		GEM_BUG_ON(!(rq->sched.execution & BIT(el->id)));
 		if (__i915_request_requeue(rq, &el->sched)) {
 			/*
 			 * Only after we confirm that we will submit
@@ -1608,7 +1608,7 @@ static void execlists_dequeue(struct intel_execlists *el)
 				 * the request immediately to another engine
 				 * rather than wait for the primary request.
 				 */
-				if (rq->execution_mask != BIT(el->id))
+				if (rq->sched.execution != BIT(el->id))
 					goto done;
 
 				if (unlikely(dl_before(first_virtual(el), rq)))
@@ -3476,17 +3476,17 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 		return NULL;
 
 	/* The rq is ready for submission; rq->execution_mask is now stable. */
-	if (unlikely(!rq->execution_mask)) {
+	if (unlikely(!rq->sched.execution)) {
 		/* Invalid selection, submit to a random engine in error */
 		i915_request_set_error_once(rq, -ENODEV);
-		WRITE_ONCE(rq->execution_mask, ALL_ENGINES);
+		WRITE_ONCE(rq->sched.execution, ALL_ENGINES);
 	}
 
 	*deadline = rq_deadline(rq);
 
-	ENGINE_TRACE(&ve->base, "rq=%llx:%llu, mask=%x, dl=%llu\n",
+	ENGINE_TRACE(&ve->base, "rq=%llx:%llu, mask=%lx, dl=%llu\n",
 		     rq->fence.context, rq->fence.seqno,
-		     rq->execution_mask, *deadline);
+		     rq->sched.execution, *deadline);
 
 	return rq;
 }
@@ -3600,12 +3600,12 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 		allowed &= bond->sibling_mask;
 
 	/* Restrict the bonded request to run on only the available engines */
-	exec = READ_ONCE(rq->execution_mask);
-	while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+	exec = READ_ONCE(rq->sched.execution);
+	while (!try_cmpxchg(&rq->sched.execution, &exec, exec & allowed))
 		;
 
 	/* Prevent the master from being re-run on the bonded engines */
-	to_request(signal)->execution_mask &= ~allowed;
+	to_request(signal)->sched.execution &= ~allowed;
 }
 
 struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 6bd2a55244b9..93a3c73bd664 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -3924,7 +3924,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
 		}
 
 		/* Reverse order as it's more likely to be unnatural */
-		request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+		request[n]->sched.execution = siblings[nsibling - n - 1]->mask;
 
 		i915_request_get(request[n]);
 		i915_request_add(request[n]);
@@ -4232,7 +4232,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 		intel_ring_advance(rq, cs);
 
 		/* Restrict this request to run on a particular engine */
-		rq->execution_mask = engine->mask;
+		rq->sched.execution = engine->mask;
 		i915_request_add(rq);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7120cf392a88..e478bd68c073 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -145,7 +145,7 @@ static void i915_fence_release(struct dma_fence *fence)
 	 * know that if the rq->execution_mask is a single bit, rq->engine
 	 * can be a physical engine with the exact corresponding mask.
 	 */
-	if (is_power_of_2(rq->execution_mask) &&
+	if (is_power_of_2(rq->sched.execution) &&
 	    !cmpxchg(&rq->sched.engine->request_pool, NULL, rq))
 		return;
 
@@ -1240,7 +1240,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 			return ret;
 	}
 
-	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+	if (is_power_of_2(to->sched.execution | READ_ONCE(from->sched.execution)))
 		ret = await_request_submit(to, from);
 	else
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 240bbccc0557..dfd74b32f10d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -228,7 +228,6 @@ struct i915_request {
 	 */
 	struct i915_sched_node sched;
 	struct i915_dependency dep;
-	intel_engine_mask_t execution_mask;
 
 	/*
 	 * A convenience pointer to the current breadcrumb value stored in
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 0f5af5557d55..1f1b318acc26 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -1560,7 +1560,7 @@ void i915_sched_prepare_request(struct i915_sched *se, struct i915_request *rq)
 	i915_sw_fence_reinit(&rq->sched.semaphore);
 
 	rq->sched.engine = se;
-	rq->execution_mask = se->mask;
+	rq->sched.execution = se->mask;
 }
 
 static struct i915_dependency *
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 1e029765ba21..dcb3e8e76f80 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -180,6 +180,7 @@ struct i915_sched_node {
 	unsigned long flags;
 #define I915_SCHED_HAS_EXTERNAL_CHAIN	BIT(0)
 	unsigned long semaphores;
+	unsigned long execution;
 
 	/**
 	 * @deadline: [virtual] deadline
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list