[PATCH 67/69] sched.engine
Chris Wilson
chris at chris-wilson.co.uk
Mon Feb 1 07:59:47 UTC 2021
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 70 +-----------
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +-
.../drm/i915/gt/intel_execlists_submission.c | 30 +++--
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 2 +-
drivers/gpu/drm/i915/gt/mock_engine.c | 13 ++-
drivers/gpu/drm/i915/gt/selftest_execlists.c | 6 +-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +-
drivers/gpu/drm/i915/i915_request.c | 36 +++---
drivers/gpu/drm/i915/i915_request.h | 5 +-
drivers/gpu/drm/i915/i915_scheduler.c | 107 ++++++++----------
drivers/gpu/drm/i915/i915_scheduler.h | 15 ++-
drivers/gpu/drm/i915/i915_scheduler_types.h | 2 +
drivers/gpu/drm/i915/i915_trace.h | 12 +-
.../gpu/drm/i915/selftests/i915_scheduler.c | 9 +-
14 files changed, 114 insertions(+), 197 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 2b402af97cec..22c09b31edd3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -408,74 +408,10 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return intel_engine_pulse(engine) == 0;
}
-static bool
-__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
+static struct intel_engine_cs *
+active_engine(struct intel_context *ce)
{
- struct intel_engine_cs *engine, *locked;
- bool ret = false;
-
- /*
- * Serialise with __i915_request_submit() so that it sees
- * is-banned?, or we know the request is already inflight.
- *
- * Note that rq->engine is unstable, and so we double
- * check that we have acquired the lock on the final engine.
- */
- locked = READ_ONCE(rq->__engine);
- spin_lock_irq(&locked->sched.lock);
- while (unlikely(locked != (engine = READ_ONCE(rq->__engine)))) {
- spin_unlock(&locked->sched.lock);
- locked = engine;
- spin_lock(&locked->sched.lock);
- }
-
- if (i915_request_is_active(rq)) {
- if (!__i915_request_is_complete(rq))
- *active = locked;
- ret = true;
- }
-
- spin_unlock_irq(&locked->sched.lock);
-
- return ret;
-}
-
-static struct intel_engine_cs *active_engine(struct intel_context *ce)
-{
- struct intel_engine_cs *engine = NULL;
- struct i915_request *rq;
-
- if (intel_context_has_inflight(ce))
- return intel_context_inflight(ce);
-
- if (!ce->timeline)
- return NULL;
-
- /*
- * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
- * to the request to prevent it being transferred to a new timeline
- * (and onto a new timeline->requests list).
- */
- rcu_read_lock();
- list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
- bool found;
-
- /* timeline is already completed upto this point? */
- if (!i915_request_get_rcu(rq))
- break;
-
- /* Check with the backend if the request is inflight */
- found = true;
- if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
- found = __active_engine(rq, &engine);
-
- i915_request_put(rq);
- if (found)
- break;
- }
- rcu_read_unlock();
-
- return engine;
+ return intel_context_inflight(ce);
}
static void kill_engines(struct i915_gem_engines *engines, bool ban)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f656d1b848b9..dd27d9b063d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -737,9 +737,9 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
if (!frame)
return -ENOMEM;
- frame->rq.__engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
+ frame->rq.sched.engine = se;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 9ceb0b2e6ee1..fc0587e51349 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -177,7 +177,7 @@ struct virtual_engine {
* use one of sibling_mask physical engines.
*/
struct ve_bond {
- const struct intel_engine_cs *master;
+ const struct i915_sched *master;
intel_engine_mask_t sibling_mask;
} *bonds;
unsigned int num_bonds;
@@ -539,10 +539,10 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
/* Resubmit the queue in execution order */
spin_lock(&se->lock);
list_for_each_entry_from(pos, &tl->requests, link) {
- if (pos->__engine == &ve->base)
+ if (pos->sched.engine == se)
break;
- __i915_request_requeue(pos, &ve->base);
+ __i915_request_requeue(pos, se);
}
spin_unlock(&se->lock);
@@ -1243,7 +1243,7 @@ static void virtual_requeue(struct intel_engine_cs *engine,
yesno(engine != ve->siblings[0]));
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- if (__i915_request_requeue(rq, engine)) {
+ if (__i915_request_requeue(rq, &engine->sched)) {
/*
* Only after we confirm that we will submit
* this request (i.e. it has not already
@@ -1263,7 +1263,6 @@ static void virtual_requeue(struct intel_engine_cs *engine,
if (!ve->context.inflight)
WRITE_ONCE(ve->context.inflight, engine);
- GEM_BUG_ON(rq->__engine != engine);
GEM_BUG_ON(ve->siblings[0] != engine);
GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
@@ -1412,7 +1411,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the preemption, some of the unwound requests may
* complete!
*/
- last = __i915_sched_rewind_requests(engine);
+ last = __i915_sched_rewind_requests(se);
/*
* We want to move the interrupted request to the back of
@@ -1422,7 +1421,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* be run after it again.
*/
if (last && defer)
- __i915_sched_defer_request(engine, last);
+ __i915_sched_defer_request(se, last);
last = NULL;
}
@@ -1969,7 +1968,7 @@ static void execlists_capture_work(struct work_struct *work)
i915_gpu_coredump_put(cap->error);
/* Return this request and all that depend upon it for signaling */
- i915_sched_resume_request(cap->rq->__engine, cap->rq);
+ i915_sched_resume_request(cap->rq->sched.engine, cap->rq);
i915_request_put(cap->rq);
kfree(cap);
@@ -2103,7 +2102,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
* simply hold that request accountable for being non-preemptible
* long enough to force the reset.
*/
- if (!i915_sched_suspend_request(engine, cap->rq))
+ if (!i915_sched_suspend_request(&engine->sched, cap->rq))
goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work);
@@ -2688,7 +2687,7 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
/* Push back any incomplete requests for replay after the reset. */
rcu_read_lock();
spin_lock_irqsave(&se->lock, flags);
- __i915_sched_rewind_requests(engine);
+ __i915_sched_rewind_requests(se);
spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
}
@@ -3358,8 +3357,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
}
static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
- const struct intel_engine_cs *master)
+virtual_find_bond(struct virtual_engine *ve, const struct i915_sched *master)
{
int i;
@@ -3378,9 +3376,9 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
intel_engine_mask_t allowed, exec;
struct ve_bond *bond;
- allowed = ~to_request(signal)->__engine->mask;
+ allowed = ~to_request(signal)->sched.engine->mask;
- bond = virtual_find_bond(ve, to_request(signal)->__engine);
+ bond = virtual_find_bond(ve, to_request(signal)->sched.engine);
if (bond)
allowed &= bond->sibling_mask;
@@ -3562,7 +3560,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
if (n == ve->num_siblings)
return -EINVAL;
- bond = virtual_find_bond(ve, master);
+ bond = virtual_find_bond(ve, &master->sched);
if (bond) {
bond->sibling_mask |= sibling->mask;
return 0;
@@ -3574,7 +3572,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
if (!bond)
return -ENOMEM;
- bond[ve->num_bonds].master = master;
+ bond[ve->num_bonds].master = &master->sched;
bond[ve->num_bonds].sibling_mask = sibling->mask;
ve->bonds = bond;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 5c35dc2a5ace..7b01237d050f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -678,7 +678,7 @@ static void __ring_rewind(struct intel_engine_cs *engine, bool stalled)
rcu_read_lock();
spin_lock_irqsave(&se->lock, flags);
- rq = __i915_sched_rewind_requests(engine);
+ rq = __i915_sched_rewind_requests(se);
spin_unlock_irqrestore(&se->lock, flags);
if (rq && __i915_request_has_started(rq))
__i915_request_reset(rq, stalled);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index f10778bedd83..9c2cdd8e18ce 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -69,13 +69,14 @@ static struct i915_request *first_request(struct mock_engine *engine)
mock.link);
}
-static void advance(struct i915_request *request)
+static void advance(struct i915_request *request,
+ struct mock_engine *engine)
{
list_del_init(&request->mock.link);
i915_request_mark_complete(request);
GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_signal_breadcrumbs(i915_request_get_engine(request));
+ intel_engine_signal_breadcrumbs(&engine->base);
}
static void hw_delay_complete(struct timer_list *t)
@@ -89,7 +90,7 @@ static void hw_delay_complete(struct timer_list *t)
/* Timer fired, first request is complete */
request = first_request(engine);
if (request)
- advance(request);
+ advance(request, engine);
/*
* Also immediately signal any subsequent 0-delay requests, but
@@ -102,7 +103,7 @@ static void hw_delay_complete(struct timer_list *t)
break;
}
- advance(request);
+ advance(request, engine);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
@@ -219,7 +220,7 @@ static void mock_submit_request(struct i915_request *request)
mod_timer(&engine->hw_delay,
jiffies + request->mock.delay);
else
- advance(request);
+ advance(request, engine);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
@@ -375,7 +376,7 @@ void mock_engine_flush(struct intel_engine_cs *engine)
spin_lock_irq(&mock->hw_lock);
list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
- advance(request);
+ advance(request, mock);
spin_unlock_irq(&mock->hw_lock);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index fbb863104ab2..b65269f4da3b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -609,7 +609,7 @@ static int live_hold_reset(void *arg)
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
- i915_sched_suspend_request(engine, rq);
+ i915_sched_suspend_request(&engine->sched, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
__intel_engine_reset_bh(engine, NULL);
@@ -631,7 +631,7 @@ static int live_hold_reset(void *arg)
GEM_BUG_ON(!i915_request_on_hold(rq));
/* But is resubmitted on release */
- i915_sched_resume_request(engine, rq);
+ i915_sched_resume_request(&engine->sched, rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: held request did not complete!\n",
engine->name);
@@ -4068,7 +4068,7 @@ static int slicein_virtual_engine(struct intel_gt *gt,
i915_request_add(rq);
if (i915_request_wait(rq, 0, timeout) < 0) {
GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
- __func__, rq->engine->name);
+ __func__, ce->engine->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index fe94483750a4..a6320f90bd4d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -304,7 +304,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&se->lock, flags);
/* Push back any incomplete requests for replay after the reset. */
- rq = __i915_sched_rewind_requests(engine);
+ rq = __i915_sched_rewind_requests(&engine->sched);
if (!rq)
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5ebf0e5166f8..4c389bd1af85 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -58,7 +58,7 @@ static struct i915_global_request {
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return dev_name(to_request(fence)->__engine->i915->drm.dev);
+ return dev_name(to_request(fence)->sched.engine->dbg.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
@@ -153,9 +153,9 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
- if (is_power_of_2(rq->execution_mask) &&
- !cmpxchg(&rq->__engine->request_pool, NULL, rq))
- return;
+ if (is_power_of_2(rq->execution_mask) && 0 &&
+ !cmpxchg(&rq->context->engine->request_pool, NULL, rq))
+ return; /* XXX rq->context is a dangling pointer */
kmem_cache_free(global.slab_requests, rq);
}
@@ -246,7 +246,7 @@ static void __i915_request_fill(struct i915_request *rq, u8 val)
static void remove_from_engine(struct i915_request *rq)
{
- struct intel_engine_cs *engine, *locked;
+ struct i915_sched *engine, *locked;
/*
* Virtual engines complicate acquiring the engine timeline lock,
@@ -254,11 +254,11 @@ static void remove_from_engine(struct i915_request *rq)
* engine lock. The simple ploy we use is to take the lock then
* check that the rq still belongs to the newly locked engine.
*/
- locked = READ_ONCE(rq->__engine);
- spin_lock_irq(&locked->sched.lock);
- while (unlikely(locked != (engine = READ_ONCE(rq->__engine)))) {
- spin_unlock(&locked->sched.lock);
- spin_lock(&engine->sched.lock);
+ locked = READ_ONCE(rq->sched.engine);
+ spin_lock_irq(&locked->lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->sched.engine)))) {
+ spin_unlock(&locked->lock);
+ spin_lock(&engine->lock);
locked = engine;
}
list_del_init(&rq->sched.link);
@@ -269,7 +269,7 @@ static void remove_from_engine(struct i915_request *rq)
/* Prevent further __await_execution() registering a cb, then flush */
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
- spin_unlock_irq(&locked->sched.lock);
+ spin_unlock_irq(&locked->lock);
__notify_execute_cb_imm(rq);
}
@@ -464,7 +464,7 @@ struct i915_request *i915_request_mark_eio(struct i915_request *rq)
bool __i915_request_submit(struct i915_request *request,
struct intel_engine_cs *engine)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct i915_sched *se = request->sched.engine;
bool result = false;
RQ_TRACE(request, "dl %llu\n", request->sched.deadline);
@@ -784,9 +784,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
rq->context = ce;
- rq->__engine = ce->engine;
rq->ring = ce->ring;
- rq->execution_mask = ce->engine->mask;
kref_init(&rq->fence.refcount);
rq->fence.flags = 0;
@@ -812,6 +810,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
i915_sched_node_reinit(&rq->sched);
+ rq->sched.engine = intel_engine_get_scheduler(ce->engine);
+ rq->execution_mask = rq->sched.engine->mask;
/* No zalloc, everything must be cleared after use */
rq->batch = NULL;
@@ -1037,7 +1037,7 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
- const intel_engine_mask_t mask = READ_ONCE(from->__engine)->mask;
+ const intel_engine_mask_t mask = READ_ONCE(from->sched.engine)->mask;
struct i915_sw_fence *wait = &to->submit;
if (!intel_context_use_semaphores(to->context))
@@ -1072,7 +1072,7 @@ emit_semaphore_wait(struct i915_request *to,
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
- to->sched.semaphores |= mask & ~to->__engine->mask;
+ to->sched.semaphores |= mask & ~to->sched.engine->mask;
wait = &to->semaphore;
await_fence:
@@ -1276,7 +1276,7 @@ await_request_submit(struct i915_request *to, struct i915_request *from)
* the waiter to be submitted immediately to the physical engine
* as it may then bypass the virtual request.
*/
- if (to->__engine == READ_ONCE(from->__engine))
+ if (to->sched.engine == READ_ONCE(from->sched.engine))
return i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
@@ -1444,7 +1444,7 @@ static bool in_order_submission(const struct i915_request *prev,
if (likely(prev->context == rq->context))
return true;
- return is_power_of_2(READ_ONCE(prev->__engine)->mask | rq->__engine->mask);
+ return is_power_of_2(READ_ONCE(prev->sched.engine)->mask | rq->sched.engine->mask);
}
static struct i915_request *
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 9c6ea5fa7b13..a3d294f2d068 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -56,7 +56,7 @@ struct i915_capture_list {
#define RQ_TRACE(rq, fmt, ...) do { \
const struct i915_request *rq__ = (rq); \
- ENGINE_TRACE(i915_request_get_engine(rq__), \
+ SCHED_TRACE(rq__->sched.engine, \
"fence " RQ_FMT ", current %d " fmt, \
RQ_ARG(rq__), hwsp_seqno(rq__), ##__VA_ARGS__); \
} while (0)
@@ -176,7 +176,6 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct intel_engine_cs *__engine;
struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
@@ -597,7 +596,7 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
static inline struct i915_sched *
i915_request_get_scheduler(const struct i915_request *rq)
{
- return intel_engine_get_scheduler(rq->__engine);
+ return rq->sched.engine;
}
static inline struct intel_engine_cs *
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 233b50427e80..93fb6fd959c6 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -307,22 +307,22 @@ static void __ipi_add(struct i915_request *rq)
/*
* Virtual engines complicate acquiring the engine timeline lock,
- * as their rq->engine pointer is not stable until under that
+ * as their rq->sched.engine pointer is not stable until under that
* engine lock. The simple ploy we use is to take the lock then
* check that the rq still belongs to the newly locked engine.
*/
#define lock_engine_irqsave(rq, flags) ({ \
struct i915_request * const rq__ = (rq); \
- struct intel_engine_cs *engine__ = READ_ONCE(rq__->__engine); \
+ struct i915_sched *se__ = READ_ONCE(rq__->sched.engine); \
\
- spin_lock_irqsave(&engine__->sched.lock, (flags)); \
- while (engine__ != READ_ONCE((rq__)->__engine)) { \
- spin_unlock(&engine__->sched.lock); \
- engine__ = READ_ONCE(rq__->__engine); \
- spin_lock(&engine__->sched.lock); \
+ spin_lock_irqsave(&se__->lock, (flags)); \
+ while (se__ != READ_ONCE((rq__)->sched.engine)) { \
+ spin_unlock(&se__->lock); \
+ se__ = READ_ONCE(rq__->sched.engine); \
+ spin_lock(&se__->lock); \
} \
\
- engine__; \
+ se__; \
})
static const struct i915_request *
@@ -574,8 +574,7 @@ static bool is_first_priolist(const struct i915_sched *se,
static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
{
- struct intel_engine_cs *engine = rq->__engine;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct i915_sched *se = rq->sched.engine;
struct list_head *pos = &rq->sched.signalers_list;
struct list_head *plist;
@@ -601,7 +600,7 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
if (__i915_request_is_complete(s))
continue;
- if (s->__engine != engine) {
+ if (s->sched.engine != se) {
ipi_deadline(s, deadline);
continue;
}
@@ -622,7 +621,6 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
* any preemption required, be dealt with upon submission.
* See engine->submit_request()
*/
- GEM_BUG_ON(i915_request_get_engine(rq) != engine);
if (i915_request_in_priority_queue(rq))
remove_from_priolist(se, rq, plist, true);
} while ((rq = stack_pop(rq, &pos)));
@@ -632,14 +630,14 @@ static bool __i915_request_set_deadline(struct i915_request *rq, u64 deadline)
void i915_request_set_deadline(struct i915_request *rq, u64 deadline)
{
- struct intel_engine_cs *engine;
+ struct i915_sched *se;
unsigned long flags;
if (deadline >= rq_deadline(rq))
return;
- engine = lock_engine_irqsave(rq, flags);
- if (!i915_sched_is_active(&engine->sched))
+ se = lock_engine_irqsave(rq, flags);
+ if (!i915_sched_is_active(se))
goto unlock;
if (deadline >= rq_deadline(rq))
@@ -650,12 +648,12 @@ void i915_request_set_deadline(struct i915_request *rq, u64 deadline)
rcu_read_lock();
if (__i915_request_set_deadline(rq, deadline))
- i915_sched_kick(&engine->sched);
+ i915_sched_kick(se);
rcu_read_unlock();
GEM_BUG_ON(rq_deadline(rq) != deadline);
unlock:
- spin_unlock_irqrestore(&engine->sched.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static u64 prio_slice(int prio)
@@ -813,12 +811,11 @@ static void ipi_priority(struct i915_request *rq, int prio)
static bool __i915_request_set_priority(struct i915_request *rq, int prio)
{
- struct intel_engine_cs *engine = rq->__engine;
+ struct i915_sched *se = rq->sched.engine;
struct list_head *pos = &rq->sched.signalers_list;
bool kick = false;
- SCHED_TRACE(&engine->sched, "PI for " RQ_FMT ", prio:%d\n",
- RQ_ARG(rq), prio);
+ SCHED_TRACE(se, "PI for " RQ_FMT ", prio:%d\n", RQ_ARG(rq), prio);
/*
* Recursively bump all dependent priorities to match the new request.
@@ -854,7 +851,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
if (__i915_request_is_complete(s))
continue;
- if (s->__engine != engine) {
+ if (s->sched.engine != se) {
ipi_priority(s, prio);
continue;
}
@@ -877,7 +874,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
* any preemption required, be dealt with upon submission.
* See engine->submit_request()
*/
- GEM_BUG_ON(rq->__engine != engine);
+ GEM_BUG_ON(rq->sched.engine != se);
if (i915_request_is_ready(rq) &&
set_earliest_deadline(rq, rq_deadline(rq)))
kick = true;
@@ -893,7 +890,7 @@ static bool __i915_request_set_priority(struct i915_request *rq, int prio)
void i915_request_set_priority(struct i915_request *rq, int prio)
{
- struct intel_engine_cs *engine;
+ struct i915_sched *se;
unsigned long flags;
if (prio <= rq_prio(rq))
@@ -931,7 +928,7 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
return;
}
- engine = lock_engine_irqsave(rq, flags);
+ se = lock_engine_irqsave(rq, flags);
if (prio <= rq_prio(rq))
goto unlock;
@@ -940,20 +937,19 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
rcu_read_lock();
if (__i915_request_set_priority(rq, prio))
- i915_sched_kick(&engine->sched);
+ i915_sched_kick(se);
rcu_read_unlock();
GEM_BUG_ON(rq_prio(rq) != prio);
unlock:
- spin_unlock_irqrestore(&engine->sched.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
-static void __defer_request(struct intel_engine_cs *engine,
+static void __defer_request(struct i915_sched *se,
struct i915_request *rq,
const u64 deadline)
{
struct list_head *pos = &rq->sched.waiters_list;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rn;
LIST_HEAD(dfs);
@@ -979,7 +975,7 @@ static void __defer_request(struct intel_engine_cs *engine,
continue;
/* Leave semaphores spinning on the other engines */
- if (w->__engine != engine)
+ if (w->sched.engine != se)
continue;
/* No waiter should start before its signaler */
@@ -1014,7 +1010,7 @@ static void __defer_request(struct intel_engine_cs *engine,
WRITE_ONCE(rq->sched.deadline, deadline);
/* Note list is reversed for waiters wrt signal hierarchy */
- GEM_BUG_ON(rq->__engine != engine);
+ GEM_BUG_ON(rq->sched.engine != se);
remove_from_priolist(se, rq, &dfs, false);
/* Track our visit, and prevent duplicate processing */
@@ -1028,10 +1024,9 @@ static void __defer_request(struct intel_engine_cs *engine,
}
}
-void __i915_sched_defer_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+void __i915_sched_defer_request(struct i915_sched *se, struct i915_request *rq)
{
- __defer_request(engine, rq,
+ __defer_request(se, rq,
max(rq_deadline(rq),
next_virtual_deadline(adj_prio(rq))));
}
@@ -1056,7 +1051,7 @@ static bool hold_request(const struct i915_request *rq)
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
- if (s->__engine != rq->__engine)
+ if (s->sched.engine != rq->sched.engine)
continue;
result = i915_request_on_hold(s);
@@ -1075,21 +1070,18 @@ static bool ancestor_on_hold(const struct i915_sched *se,
return unlikely(!list_empty(&se->hold)) && hold_request(rq);
}
-bool __i915_request_requeue(struct i915_request *rq,
- struct intel_engine_cs *engine)
+bool __i915_request_requeue(struct i915_request *rq, struct i915_sched *se)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
-
RQ_TRACE(rq, "transfer from %s to %s\n",
- rq->engine->name, engine->name);
+ rq->sched.engine->dbg.name, se->dbg.name);
lockdep_assert_held(&se->lock);
lockdep_assert_held(&i915_request_get_scheduler(rq)->lock);
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
- GEM_BUG_ON(rq->__engine == engine);
+ GEM_BUG_ON(rq->sched.engine == se);
remove_from_priolist(i915_request_get_scheduler(rq), rq, NULL, false);
- WRITE_ONCE(rq->__engine, engine);
+ WRITE_ONCE(rq->sched.engine, se);
if (__i915_request_is_complete(rq)) {
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1108,7 +1100,7 @@ bool __i915_request_requeue(struct i915_request *rq,
/* Maintain request ordering wrt to existing on target */
__i915_request_set_deadline(rq, deadline);
if (!list_empty(&rq->sched.waiters_list))
- __defer_request(engine, rq, deadline);
+ __defer_request(se, rq, deadline);
GEM_BUG_ON(rq_deadline(rq) == I915_DEADLINE_NEVER);
}
@@ -1119,7 +1111,7 @@ bool __i915_request_requeue(struct i915_request *rq,
void i915_request_enqueue(struct i915_request *rq)
{
- struct i915_sched *se = i915_request_get_scheduler(rq);
+ struct i915_sched *se = rq->sched.engine;
u64 dl = earliest_deadline(rq);
unsigned long flags;
bool kick = false;
@@ -1148,10 +1140,8 @@ void i915_request_enqueue(struct i915_request *rq)
i915_sched_kick(se);
}
-struct i915_request *
-__i915_sched_rewind_requests(struct intel_engine_cs *engine)
+struct i915_request *__i915_sched_rewind_requests(struct i915_sched *se)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq, *rn, *active = NULL;
u64 deadline = I915_DEADLINE_NEVER;
struct list_head *pl;
@@ -1199,14 +1189,13 @@ __i915_sched_rewind_requests(struct intel_engine_cs *engine)
return active;
}
-bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool __i915_sched_suspend_request(struct i915_sched *se,
struct i915_request *rq)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
LIST_HEAD(list);
lockdep_assert_held(&se->lock);
- GEM_BUG_ON(rq->__engine != engine);
+ GEM_BUG_ON(rq->sched.engine != se);
if (__i915_request_is_complete(rq)) /* too late! */
return false;
@@ -1241,7 +1230,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
continue;
/* Leave semaphores spinning on the other engines */
- if (w->__engine != engine)
+ if (w->sched.engine != se)
continue;
if (!i915_request_is_ready(w))
@@ -1264,26 +1253,22 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
return true;
}
-bool i915_sched_suspend_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+bool i915_sched_suspend_request(struct i915_sched *se, struct i915_request *rq)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
bool result;
if (i915_request_on_hold(rq))
return false;
spin_lock_irq(&se->lock);
- result = __i915_sched_suspend_request(engine, rq);
+ result = __i915_sched_suspend_request(se, rq);
spin_unlock_irq(&se->lock);
return result;
}
-void __i915_sched_resume_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+void __i915_sched_resume_request(struct i915_sched *se, struct i915_request *rq)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
LIST_HEAD(list);
bool submit = false;
@@ -1323,7 +1308,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
if (rq->fence.error)
i915_request_set_error_once(w, rq->fence.error);
- if (w->__engine != engine)
+ if (w->sched.engine != se)
continue;
/* We also treat the on-hold status as a visited bit */
@@ -1353,13 +1338,11 @@ void i915_request_update_deadline(struct i915_request *rq)
i915_request_set_deadline(rq, earliest_deadline(rq));
}
-void i915_sched_resume_request(struct intel_engine_cs *engine,
+void i915_sched_resume_request(struct i915_sched *se,
struct i915_request *rq)
{
- struct i915_sched *se = intel_engine_get_scheduler(engine);
-
spin_lock_irq(&se->lock);
- __i915_sched_resume_request(engine, rq);
+ __i915_sched_resume_request(se, rq);
spin_unlock_irq(&se->lock);
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index c5612dd4a081..c7e0f479e8df 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -15,7 +15,6 @@
#include "i915_request.h"
struct drm_printer;
-struct intel_engine_cs;
#define SCHED_TRACE(se, fmt, ...) do { \
const struct i915_sched *se__ __maybe_unused = (se); \
@@ -53,21 +52,21 @@ void i915_request_update_deadline(struct i915_request *request);
void i915_request_enqueue(struct i915_request *request);
bool __i915_request_requeue(struct i915_request *rq,
- struct intel_engine_cs *engine);
+ struct i915_sched *se);
struct i915_request *
-__i915_sched_rewind_requests(struct intel_engine_cs *engine);
-void __i915_sched_defer_request(struct intel_engine_cs *engine,
+__i915_sched_rewind_requests(struct i915_sched *engine);
+void __i915_sched_defer_request(struct i915_sched *engine,
struct i915_request *request);
-bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool __i915_sched_suspend_request(struct i915_sched *engine,
struct i915_request *rq);
-void __i915_sched_resume_request(struct intel_engine_cs *engine,
+void __i915_sched_resume_request(struct i915_sched *engine,
struct i915_request *request);
-bool i915_sched_suspend_request(struct intel_engine_cs *engine,
+bool i915_sched_suspend_request(struct i915_sched *engine,
struct i915_request *request);
-void i915_sched_resume_request(struct intel_engine_cs *engine,
+void i915_sched_resume_request(struct i915_sched *engine,
struct i915_request *rq);
static inline u64 i915_sched_to_ticks(ktime_t kt)
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 34c46c526f74..1b13642f5458 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -142,6 +142,8 @@ struct i915_sched_attr {
* others.
*/
struct i915_sched_node {
+ struct i915_sched *engine;
+
spinlock_t lock; /* protect the lists */
struct list_head signalers_list; /* those before us, we depend upon */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index a4cda440f739..0bfb5b01291a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -804,9 +804,9 @@ TRACE_EVENT(i915_request_in,
),
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
- __entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->uabi_instance;
+ __entry->dev = rq->context->engine->i915->drm.primary->index;
+ __entry->class = rq->context->engine->uabi_class;
+ __entry->instance = rq->context->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->prio = rq->sched.attr.priority;
@@ -833,9 +833,9 @@ TRACE_EVENT(i915_request_out,
),
TP_fast_assign(
- __entry->dev = rq->engine->i915->drm.primary->index;
- __entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->uabi_instance;
+ __entry->dev = rq->context->engine->i915->drm.primary->index;
+ __entry->class = rq->context->engine->uabi_class;
+ __entry->instance = rq->context->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->completed = i915_request_completed(rq);
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 0180a21fa1a7..d1ce2e05b6b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -606,14 +606,13 @@ static int igt_deadline_chains(void *arg)
static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
{
- struct intel_engine_cs *engine = rq->__engine;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ struct i915_sched *se = rq->sched.engine;
/* XXX No generic means to unwind incomplete requests yet */
if (!i915_request_in_priority_queue(rq))
return false;
- if (!intel_engine_has_preemption(engine))
+ if (!intel_engine_has_preemption(rq->context->engine))
return false;
spin_lock_irq(&se->lock);
@@ -624,10 +623,10 @@ static bool igt_defer(struct i915_request *rq, unsigned long v, unsigned long e)
/* Then the very first request must be the one everyone depends on */
rq = list_first_entry(lookup_priolist(se, 0), typeof(*rq), sched.link);
- GEM_BUG_ON(rq->__engine != engine);
+ GEM_BUG_ON(rq->sched.engine != se);
/* Deferring the first request will then have to defer all requests */
- __i915_sched_defer_request(engine, rq);
+ __i915_sched_defer_request(se, rq);
spin_unlock_irq(&se->lock);
return true;
--
2.20.1
More information about the Intel-gfx-trybot
mailing list