[PATCH 23/51] drm/i915: Wrap access to engine->active
Chris Wilson
chris at chris-wilson.co.uk
Sun Jan 31 02:09:03 UTC 2021
Now that we have separated out the scheduling lists from the engine
type, provide a helper for returning the scheduler interface from the
engine. We will want to make further changes in the near future and so
wish to minimise the noise.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 +-
.../drm/i915/gt/intel_execlists_submission.c | 21 +++++++++++--------
drivers/gpu/drm/i915/gt/selftest_execlists.c | 13 ++++++------
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 3 ++-
drivers/gpu/drm/i915/i915_request.c | 7 -------
drivers/gpu/drm/i915/selftests/igt_spinner.c | 2 +-
6 files changed, 23 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 70a164bfdaa8..0854159eeb5a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -277,7 +277,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park)
engine->park(engine);
- i915_sched_park_engine(&engine->sched);
+ i915_sched_park_engine(intel_engine_get_scheduler(engine));
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 566c935419fe..7160d92b4f8d 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1346,7 +1346,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
break;
}
- while ((rb = rb_first_cached(&engine->sched.queue))) {
+ while ((rb = rb_first_cached(&se->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -1425,7 +1425,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
- rb_erase_cached(&p->node, &engine->sched.queue);
+ rb_erase_cached(&p->node, &se->queue);
i915_priolist_free(p);
}
done:
@@ -2951,6 +2951,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
container_of(wrk, typeof(*ve), rcu.work);
+ struct i915_sched_engine *se = intel_engine_get_scheduler(&ve->base);
unsigned int n;
GEM_BUG_ON(ve->context.inflight);
@@ -2959,7 +2960,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
if (unlikely(ve->request)) {
struct i915_request *old;
- spin_lock_irq(&ve->base.sched.lock);
+ spin_lock_irq(&se->lock);
old = fetch_and_zero(&ve->request);
if (old) {
@@ -2968,7 +2969,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
i915_request_put(old);
}
- spin_unlock_irq(&ve->base.sched.lock);
+ spin_unlock_irq(&se->lock);
}
/*
@@ -2978,7 +2979,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
* rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling.
*/
- i915_sched_fini_engine(&ve->base.sched);
+ i915_sched_fini_engine(se);
/* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
@@ -2996,7 +2997,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
spin_unlock_irq(&sibling->sched.lock);
}
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched.tasklet));
+ GEM_BUG_ON(__tasklet_is_scheduled(&se->tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
lrc_fini(&ve->context);
@@ -3154,6 +3155,8 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct i915_sched_engine *se =
+ intel_engine_get_scheduler(sibling);
struct ve_node * const node = &ve->nodes[sibling->id];
struct rb_node **parent, *rb;
bool first;
@@ -3161,7 +3164,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
if (!READ_ONCE(ve->request))
break; /* already handled by a sibling's tasklet */
- spin_lock_irq(&sibling->sched.lock);
+ spin_lock_irq(&se->lock);
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
@@ -3211,10 +3214,10 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
if (first && prio > sibling->execlists.queue_priority_hint)
- intel_engine_kick_scheduler(sibling);
+ i915_sched_kick(se);
unlock_engine:
- spin_unlock_irq(&sibling->sched.lock);
+ spin_unlock_irq(&se->lock);
if (intel_context_inflight(&ve->context))
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 311b3b3a2bf7..68a33efb097c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -568,6 +568,7 @@ static int live_hold_reset(void *arg)
return -ENOMEM;
for_each_engine(engine, gt, id) {
+ struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
struct intel_context *ce;
struct i915_request *rq;
@@ -602,9 +603,9 @@ static int live_hold_reset(void *arg)
err = -EBUSY;
goto out;
}
- tasklet_disable(&engine->sched.tasklet);
+ tasklet_disable(&se->tasklet);
- engine->sched.tasklet.callback(&engine->sched.tasklet);
+ se->tasklet.callback(&se->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
@@ -614,7 +615,7 @@ static int live_hold_reset(void *arg)
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
- tasklet_enable(&engine->sched.tasklet);
+ tasklet_enable(&se->tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + id,
>->reset.flags);
local_bh_enable();
@@ -4577,9 +4578,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
err = -EBUSY;
goto out_heartbeat;
}
- tasklet_disable(&engine->sched.tasklet);
+ tasklet_disable(&se->tasklet);
- engine->sched.tasklet.callback(&engine->sched.tasklet);
+ se->tasklet.callback(&se->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
@@ -4596,7 +4597,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->sched.tasklet);
+ tasklet_enable(&se->tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags);
local_bh_enable();
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3b57fa64867f..f4df22b9095e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -180,8 +180,9 @@ static void schedule_out(struct i915_request *rq)
static void __guc_dequeue(struct intel_engine_cs *engine)
{
+ struct i915_sched_engine * const se =
+ intel_engine_get_scheduler(engine);
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
struct i915_request **first = execlists->inflight;
struct i915_request ** const last_port = first + execlists->port_mask;
struct i915_request *last = first[0];
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f24f0dc6d907..75ff3be48f47 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -538,7 +538,6 @@ bool __i915_request_submit(struct i915_request *request)
RQ_TRACE(request, "\n");
- GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&se->lock);
/*
@@ -636,18 +635,12 @@ void i915_request_submit(struct i915_request *request)
void __i915_request_unsubmit(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
- struct i915_sched_engine *se = intel_engine_get_scheduler(engine);
-
/*
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
RQ_TRACE(request, "\n");
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&se->lock);
-
/*
* Before we remove this breadcrumb from the signal list, we have
* to ensure that a concurrent dma_fence_enable_signaling() does not
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 41f99f59814a..ee554cb89854 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -221,7 +221,7 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
if (i915_request_is_ready(rq))
- i915_sched_flush(&rq->engine->sched);
+ intel_engine_flush_scheduler(rq->engine);
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
--
2.20.1
More information about the Intel-gfx-trybot
mailing list