[PATCH 73/75] execmask
Chris Wilson
chris at chris-wilson.co.uk
Tue Feb 2 00:36:39 UTC 2021
---
drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 2 +-
.../drm/i915/gt/intel_execlists_submission.c | 22 +++++++++----------
drivers/gpu/drm/i915/gt/selftest_execlists.c | 4 ++--
drivers/gpu/drm/i915/i915_request.c | 4 ++--
drivers/gpu/drm/i915/i915_request.h | 1 -
drivers/gpu/drm/i915/i915_scheduler.c | 2 +-
drivers/gpu/drm/i915/i915_scheduler_types.h | 1 +
7 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 4e8dddc817a5..bdc6e5e40b14 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -287,7 +287,7 @@ int gen12_emit_flush_xcs(const struct intel_engine_cs *engine,
if (mode & EMIT_INVALIDATE)
cmd += 2;
if (mode & EMIT_INVALIDATE)
- aux_inv = rq->execution_mask & ~BIT(BCS0);
+ aux_inv = rq->sched.execution & ~BIT(BCS0);
if (aux_inv)
cmd += 2 * hweight8(aux_inv) + 2;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 84c3c158a584..9ef4e820ba34 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -566,7 +566,7 @@ static void kick_siblings(struct intel_execlists *el,
* same as other native request.
*/
if (i915_request_in_priority_queue(rq) &&
- rq->execution_mask != BIT(el->id))
+ rq->sched.execution != BIT(el->id))
resubmit_virtual_request(el, rq, ve);
if (!i915_sched_is_idle(ve->base.sched))
@@ -818,7 +818,7 @@ assert_pending_valid(const struct intel_execlists *el,
* that they are never stuck behind a hog and can be immediately
* transferred onto the next idle engine.
*/
- if (rq->execution_mask != BIT(el->id) && port != el->pending) {
+ if (rq->sched.execution != BIT(el->id) && port != el->pending) {
GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
el->sched.dbg.name,
ce->timeline->fence_context,
@@ -971,7 +971,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
if (!rq)
return false;
- if (!(rq->execution_mask & el->sched.mask)) /* We peeked too soon! */
+ if (!(rq->sched.execution & el->sched.mask)) /* We peeked too soon! */
return false;
/*
@@ -1233,7 +1233,7 @@ static void virtual_requeue(struct intel_execlists *el,
rq_deadline(rq),
yesno(el->sched.priv != ve->siblings[0]));
- GEM_BUG_ON(!(rq->execution_mask & BIT(el->id)));
+ GEM_BUG_ON(!(rq->sched.execution & BIT(el->id)));
if (__i915_request_requeue(rq, &el->sched)) {
/*
* Only after we confirm that we will submit
@@ -1464,7 +1464,7 @@ static void execlists_dequeue(struct intel_execlists *el)
* the request immediately to another engine
* rather than wait for the primary request.
*/
- if (rq->execution_mask != BIT(el->id))
+ if (rq->sched.execution != BIT(el->id))
goto done;
if (unlikely(dl_before(first_virtual(el), rq)))
@@ -3311,17 +3311,17 @@ virtual_submission_mask(struct virtual_engine *ve, u64 *deadline)
return NULL;
/* The rq is ready for submission; rq->execution_mask is now stable. */
- if (unlikely(!rq->execution_mask)) {
+ if (unlikely(!rq->sched.execution)) {
/* Invalid selection, submit to a random engine in error */
i915_request_set_error_once(rq, -ENODEV);
- WRITE_ONCE(rq->execution_mask, ALL_ENGINES);
+ WRITE_ONCE(rq->sched.execution, ALL_ENGINES);
}
*deadline = rq_deadline(rq);
ENGINE_TRACE(&ve->base, "rq=%llx:%llu, mask=%x, dl=%llu\n",
rq->fence.context, rq->fence.seqno,
- rq->execution_mask, *deadline);
+ rq->sched.execution, *deadline);
return rq;
}
@@ -3435,12 +3435,12 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
allowed &= bond->sibling_mask;
/* Restrict the bonded request to run on only the available engines */
- exec = READ_ONCE(rq->execution_mask);
- while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+ exec = READ_ONCE(rq->sched.execution);
+ while (!try_cmpxchg(&rq->sched.execution, &exec, exec & allowed))
;
/* Prevent the master from being re-run on the bonded engines */
- to_request(signal)->execution_mask &= ~allowed;
+ to_request(signal)->sched.execution &= ~allowed;
}
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 25619ab01b1a..4ac4136be8a0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -3943,7 +3943,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
}
/* Reverse order as it's more likely to be unnatural */
- request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
+ request[n]->sched.execution = siblings[nsibling - n - 1]->mask;
i915_request_get(request[n]);
i915_request_add(request[n]);
@@ -4251,7 +4251,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
intel_ring_advance(rq, cs);
/* Restrict this request to run on a particular engine */
- rq->execution_mask = engine->mask;
+ rq->sched.execution = engine->mask;
i915_request_add(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9ce1cdccde65..21ca09884447 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -143,7 +143,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
- if (is_power_of_2(rq->execution_mask) &&
+ if (is_power_of_2(rq->sched.execution) &&
!cmpxchg(&rq->sched.engine->request_pool, NULL, rq))
return;
@@ -1238,7 +1238,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return ret;
}
- if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+ if (is_power_of_2(to->sched.execution | READ_ONCE(from->sched.execution)))
ret = await_request_submit(to, from);
else
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 240bbccc0557..dfd74b32f10d 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -228,7 +228,6 @@ struct i915_request {
*/
struct i915_sched_node sched;
struct i915_dependency dep;
- intel_engine_mask_t execution_mask;
/*
* A convenience pointer to the current breadcrumb value stored in
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 35f9839c6810..c0b4fa561662 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -1528,7 +1528,7 @@ void i915_sched_prepare_request(struct i915_sched *se, struct i915_request *rq)
i915_sw_fence_reinit(&i915_request_get(rq)->sched.semaphore);
rq->sched.engine = se;
- rq->execution_mask = se->mask;
+ rq->sched.execution = se->mask;
}
static struct i915_dependency *
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 977b0c035b59..53bce8a0785b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -180,6 +180,7 @@ struct i915_sched_node {
unsigned long flags;
#define I915_SCHED_HAS_EXTERNAL_CHAIN BIT(0)
unsigned long semaphores;
+ unsigned long execution;
/**
* @deadline: [virtual] deadline
--
2.20.1
More information about the Intel-gfx-trybot
mailing list