[PATCH 46/47] drm/i915: Move i915_request.execution_mask to i915_request.sched

Chris Wilson chris at chris-wilson.co.uk
Tue Feb 9 21:27:07 UTC 2021


As was apparent in the last patch, the i915_request.execution_mask is
driven by the scheduler.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/gen8_engine_cs.c      |  2 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 28 +++++++++----------
 drivers/gpu/drm/i915/gt/selftest_execlists.c  |  4 +--
 drivers/gpu/drm/i915/i915_request.c           |  4 +--
 drivers/gpu/drm/i915/i915_request.h           |  1 -
 drivers/gpu/drm/i915/i915_scheduler.c         |  2 +-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |  1 +
 7 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 4e8dddc817a5..bdc6e5e40b14 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -287,7 +287,7 @@ int gen12_emit_flush_xcs(const struct intel_engine_cs *engine,
 	if (mode & EMIT_INVALIDATE)
 		cmd += 2;
 	if (mode & EMIT_INVALIDATE)
-		aux_inv = rq->execution_mask & ~BIT(BCS0);
+		aux_inv = rq->sched.execution & ~BIT(BCS0);
 	if (aux_inv)
 		cmd += 2 * hweight8(aux_inv) + 2;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index f9d1366ef6f0..2a3b1ed64663 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -283,7 +283,7 @@ struct virtual_engine {
 	 */
 	struct ve_bond {
 		const struct i915_sched *master;
-		intel_engine_mask_t sibling_mask;
+		unsigned long sibling_mask;
 	} *bonds;
 	unsigned int num_bonds;
 
@@ -671,7 +671,7 @@ static void kick_siblings(struct intel_execlists *el,
 	 * same as other native request.
 	 */
 	if (i915_request_in_priority_queue(rq) &&
-	    rq->execution_mask != el->sched.mask)
+	    rq->sched.execution != el->sched.mask)
 		resubmit_virtual_request(el, rq, ve);
 
 	if (!i915_sched_is_idle(ve->base.sched))
@@ -927,7 +927,7 @@ assert_pending_valid(const struct intel_execlists *el,
 		 * that they are never stuck behind a hog and can be immediately
 		 * transferred onto the next idle engine.
 		 */
-		if (rq->execution_mask != el->sched.mask &&
+		if (rq->sched.execution != el->sched.mask &&
 		    port != el->pending) {
 			GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
 				      el->sched.dbg.name,
@@ -1081,7 +1081,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	if (!rq)
 		return false;
 
-	if (!(rq->execution_mask & el->sched.mask)) /* We peeked too soon! */
+	if (!(rq->sched.execution & el->sched.mask)) /* We peeked too soon! */
 		return false;
 
 	/*
@@ -1346,7 +1346,7 @@ static void virtual_requeue(struct intel_execlists *el,
 			     rq_deadline(rq),
 			     yesno(el->sched.priv != ve->siblings[0]));
 
-		GEM_BUG_ON(!(rq->execution_mask & el->sched.mask));
+		GEM_BUG_ON(!(rq->sched.execution & el->sched.mask));
 		if (__i915_request_requeue(rq, &el->sched)) {
 			/*
 			 * Only after we confirm that we will submit
@@ -1575,7 +1575,7 @@ static void execlists_dequeue(struct intel_execlists *el)
 			 * the request immediately to another engine
 			 * rather than wait for the primary request.
 			 */
-			if (rq->execution_mask != el->sched.mask)
+			if (rq->sched.execution != el->sched.mask)
 				goto done;
 
 			if (unlikely(dl_before(first_virtual(el), rq)))
@@ -3358,17 +3358,17 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
 		return NULL;
 
 	/* The rq is ready for submission; rq->execution_mask is now stable. */
-	if (unlikely(!rq->execution_mask)) {
+	if (unlikely(!rq->sched.execution)) {
 		/* Invalid selection, submit to a random engine in error */
 		i915_request_set_error_once(rq, -ENODEV);
-		WRITE_ONCE(rq->execution_mask, ALL_ENGINES);
+		WRITE_ONCE(rq->sched.execution, ALL_ENGINES);
 	}
 
 	*deadline = rq_deadline(rq);
 
-	ENGINE_TRACE(&ve->base, "rq=%llx:%llu, mask=%x, dl=%llu\n",
+	ENGINE_TRACE(&ve->base, "rq=%llx:%llu, mask=%lx, dl=%llu\n",
 		     rq->fence.context, rq->fence.seqno,
-		     rq->execution_mask, *deadline);
+		     rq->sched.execution, *deadline);
 
 	return rq;
 }
@@ -3472,7 +3472,7 @@ static void
 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 {
 	struct virtual_engine *ve = to_virtual_context(rq->context);
-	intel_engine_mask_t allowed, exec;
+	unsigned long allowed, exec;
 	struct ve_bond *bond;
 
 	allowed = ~to_request(signal)->sched.engine->mask;
@@ -3482,12 +3482,12 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 		allowed &= bond->sibling_mask;
 
 	/* Restrict the bonded request to run on only the available engines */
-	exec = READ_ONCE(rq->execution_mask);
-	while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+	exec = READ_ONCE(rq->sched.execution);
+	while (!try_cmpxchg(&rq->sched.execution, &exec, exec & allowed))
 		;
 
 	/* Prevent the master from being re-run on the bonded engines */
-	to_request(signal)->execution_mask &= ~allowed;
+	to_request(signal)->sched.execution &= ~allowed;
 }
 
 struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 6f3c20934007..668d4faf7ed5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -3927,7 +3927,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
 		}
 
 		/* Reverse order as it's more likely to be unnatural */
-		request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+		request[n]->sched.execution = siblings[nsibling - n - 1]->mask;
 
 		i915_request_get(request[n]);
 		i915_request_add(request[n]);
@@ -4236,7 +4236,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 		intel_ring_advance(rq, cs);
 
 		/* Restrict this request to run on a particular engine */
-		rq->execution_mask = engine->mask;
+		rq->sched.execution = engine->mask;
 		i915_request_add(rq);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 207f0a1fd886..19171f77cb0c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -145,7 +145,7 @@ static void i915_fence_release(struct dma_fence *fence)
 	 * know that if the rq->execution_mask is a single bit, rq->engine
 	 * can be a physical engine with the exact corresponding mask.
 	 */
-	if (is_power_of_2(rq->execution_mask) &&
+	if (is_power_of_2(rq->sched.execution) &&
 	    !cmpxchg(&rq->sched.engine->request_pool, NULL, rq))
 		return;
 
@@ -1236,7 +1236,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 			return ret;
 	}
 
-	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+	if (is_power_of_2(to->sched.execution | READ_ONCE(from->sched.execution)))
 		ret = await_request_submit(to, from);
 	else
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 885f83a6ce81..9b45d821f630 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -228,7 +228,6 @@ struct i915_request {
 	 */
 	struct i915_sched_node sched;
 	struct i915_dependency dep;
-	intel_engine_mask_t execution_mask;
 
 	/*
 	 * A convenience pointer to the current breadcrumb value stored in
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2cfaaa12a357..1734dfe4e6eb 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -1545,7 +1545,7 @@ void i915_sched_prepare_request(struct i915_sched *se, struct i915_request *rq)
 	i915_sw_fence_reinit(&rq->sched.semaphore);
 
 	rq->sched.engine = se;
-	rq->execution_mask = se->mask;
+	rq->sched.execution = se->mask;
 }
 
 static struct i915_dependency *
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index cc0953cefa72..5552b137fff5 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -218,6 +218,7 @@ struct i915_sched_node {
 	unsigned long flags;
 #define I915_SCHED_HAS_EXTERNAL_CHAIN	BIT(0)
 	unsigned long semaphores;
+	unsigned long execution;
 
 	/**
 	 * @deadline: [virtual] deadline
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list