[PATCH 65/73] el
Chris Wilson
chris at chris-wilson.co.uk
Tue Jan 12 13:58:08 UTC 2021
---
drivers/gpu/drm/i915/gt/intel_context.c | 4 +-
.../drm/i915/gt/intel_execlists_submission.c | 233 +++++++++---------
2 files changed, 116 insertions(+), 121 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index d01678c26a91..5b886828529d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -351,13 +351,13 @@ static int __intel_context_active(struct i915_active *active)
intel_context_get(ce);
/* everything should already be activated by intel_context_pre_pin() */
- GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
+ __i915_active_acquire(&ce->ring->vma->active);
__intel_ring_pin(ce->ring);
__intel_timeline_pin(ce->timeline);
if (ce->state) {
- GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
+ __i915_active_acquire(&ce->state->active);
__i915_vma_pin(ce->state);
i915_vma_make_unshrinkable(ce->state);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8fb8ba5654b1..507c0d9af7a1 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -679,14 +679,14 @@ static u64 execlists_update_context(struct i915_request *rq)
return desc;
}
-static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
+static inline void write_desc(struct intel_engine_execlists *el, u64 desc, u32 port)
{
- if (execlists->ctrl_reg) {
- writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
- writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
+ if (el->ctrl_reg) {
+ writel(lower_32_bits(desc), el->submit_reg + port * 2);
+ writel(upper_32_bits(desc), el->submit_reg + port * 2 + 1);
} else {
- writel(upper_32_bits(desc), execlists->submit_reg);
- writel(lower_32_bits(desc), execlists->submit_reg);
+ writel(upper_32_bits(desc), el->submit_reg);
+ writel(lower_32_bits(desc), el->submit_reg);
}
}
@@ -709,12 +709,12 @@ dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
}
static __maybe_unused void
-trace_ports(const struct intel_engine_execlists *execlists,
+trace_ports(const struct intel_engine_execlists *el,
const char *msg,
struct i915_request * const *ports)
{
const struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
+ container_of(el, typeof(*engine), execlists);
char __maybe_unused p0[40], p1[40];
if (!ports[0])
@@ -732,35 +732,35 @@ reset_in_progress(const struct intel_engine_cs *engine)
}
static __maybe_unused bool
-assert_pending_valid(const struct intel_engine_execlists *execlists,
+assert_pending_valid(const struct intel_engine_execlists *el,
const char *msg)
{
struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
+ container_of(el, typeof(*engine), execlists);
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
bool sentinel = false;
u32 ccid = -1;
- trace_ports(execlists, msg, execlists->pending);
+ trace_ports(el, msg, el->pending);
/* We may be messing around with the lists during reset, lalala */
if (reset_in_progress(engine))
return true;
- if (!execlists->pending[0]) {
+ if (!el->pending[0]) {
GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
engine->name);
return false;
}
- if (execlists->pending[execlists_num_ports(execlists)]) {
+ if (el->pending[execlists_num_ports(el)]) {
GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
- engine->name, execlists_num_ports(execlists));
+ engine->name, execlists_num_ports(el));
return false;
}
- for (port = execlists->pending; (rq = *port); port++) {
+ for (port = el->pending; (rq = *port); port++) {
unsigned long flags;
bool ok = true;
@@ -771,7 +771,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
ce = rq->context;
@@ -780,7 +780,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
engine->name,
ccid, ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
ccid = ce->lrc.ccid;
@@ -794,7 +794,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
sentinel = i915_request_has_sentinel(rq);
@@ -804,12 +804,11 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
* that they are never stuck behind a hog and can be immediately
* transferred onto the next idle engine.
*/
- if (rq->execution_mask != engine->mask &&
- port != execlists->pending) {
+ if (rq->execution_mask != engine->mask && port != el->pending) {
GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
engine->name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
return false;
}
@@ -825,7 +824,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
ok = false;
goto unlock;
}
@@ -834,7 +833,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
ok = false;
goto unlock;
}
@@ -843,7 +842,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
- port - execlists->pending);
+ port - el->pending);
ok = false;
goto unlock;
}
@@ -859,10 +858,10 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
+ struct intel_engine_execlists *el = &engine->execlists;
unsigned int n;
- GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+ GEM_BUG_ON(!assert_pending_valid(el, "submit"));
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -880,17 +879,15 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* currently ensured by the fact that we always write the same number
* of elsq entries, keep this in mind before changing the loop below.
*/
- for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq = execlists->pending[n];
+ for (n = execlists_num_ports(el); n--; ) {
+ struct i915_request *rq = el->pending[n];
- write_desc(execlists,
- rq ? execlists_update_context(rq) : 0,
- n);
+ write_desc(el, rq ? execlists_update_context(rq) : 0, n);
}
/* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
+ if (el->ctrl_reg)
+ writel(EL_CTRL_LOAD, el->ctrl_reg);
}
static bool ctx_single_port_submission(const struct intel_context *ce)
@@ -1125,9 +1122,9 @@ static void start_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&el->timer, duration);
}
-static void record_preemption(struct intel_engine_execlists *execlists)
+static void record_preemption(struct intel_engine_execlists *el)
{
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+ (void)I915_SELFTEST_ONLY(el->preempt_hang.count++);
}
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
@@ -1274,9 +1271,9 @@ static void virtual_requeue(struct intel_engine_cs *engine,
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request **port = execlists->pending;
- struct i915_request ** const last_port = port + execlists->port_mask;
+ struct intel_engine_execlists * const el = &engine->execlists;
+ struct i915_request **port = el->pending;
+ struct i915_request ** const last_port = port + el->port_mask;
struct i915_request *last, * const *active;
struct i915_priolist *pl;
bool submit = false;
@@ -1314,7 +1311,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of trouble.
*
*/
- active = execlists->active;
+ active = el->active;
while ((last = *active) && completed(last))
active++;
@@ -1328,7 +1325,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->fence.seqno,
rq_deadline(last),
rq_prio(last));
- record_preemption(execlists);
+ record_preemption(el);
/*
* Don't let the RING_HEAD advance past the breadcrumb
@@ -1350,11 +1347,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
} else if (timeslice_expired(engine, last)) {
ENGINE_TRACE(engine,
"expired:%s last=%llx:%llu, deadline=%llu, now=%llu, yield?=%s\n",
- yesno(timer_expired(&execlists->timer)),
+ yesno(timer_expired(&el->timer)),
last->fence.context, last->fence.seqno,
rq_deadline(last),
i915_sched_to_ticks(ktime_get()),
- yesno(timeslice_yield(execlists, last)));
+ yesno(timeslice_yield(el, last)));
/*
* Consume this timeslice; ensure we start a new one.
@@ -1372,7 +1369,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* consumption of this timeslice, if we submit the
* same context again, grant it a full timeslice.
*/
- cancel_timer(&execlists->timer);
+ cancel_timer(&el->timer);
ring_set_paused(engine, 1);
defer_active(engine);
@@ -1411,7 +1408,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
- if (!RB_EMPTY_ROOT(&execlists->virtual.rb_root))
+ if (!RB_EMPTY_ROOT(&el->virtual.rb_root))
virtual_requeue(engine, last);
for_each_priolist(pl, &engine->active.queue) {
@@ -1510,21 +1507,19 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of ordered contexts.
*/
if (submit &&
- memcmp(active,
- execlists->pending,
- (port - execlists->pending) * sizeof(*port))) {
+ memcmp(active, el->pending, (port - el->pending) * sizeof(*port))) {
*port = NULL;
- while (port-- != execlists->pending)
- execlists_schedule_in(*port, port - execlists->pending);
+ while (port-- != el->pending)
+ execlists_schedule_in(*port, port - el->pending);
- WRITE_ONCE(execlists->yield, -1);
+ WRITE_ONCE(el->yield, -1);
set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
ring_set_paused(engine, 0);
- while (port-- != execlists->pending)
+ while (port-- != el->pending)
i915_request_put(*port);
- *execlists->pending = NULL;
+ *el->pending = NULL;
}
}
@@ -1549,27 +1544,27 @@ copy_ports(struct i915_request **dst, struct i915_request **src, int count)
}
static struct i915_request **
-cancel_port_requests(struct intel_engine_execlists * const execlists,
+cancel_port_requests(struct intel_engine_execlists * const el,
struct i915_request **inactive)
{
struct i915_request * const *port;
- for (port = execlists->pending; *port; port++)
+ for (port = el->pending; *port; port++)
*inactive++ = *port;
- clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
+ clear_ports(el->pending, ARRAY_SIZE(el->pending));
/* Mark the end of active before we overwrite *active */
- for (port = xchg(&execlists->active, execlists->pending); *port; port++)
+ for (port = xchg(&el->active, el->pending); *port; port++)
*inactive++ = *port;
- clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ clear_ports(el->inflight, ARRAY_SIZE(el->inflight));
smp_wmb(); /* complete the seqlock for execlists_active() */
- WRITE_ONCE(execlists->active, execlists->inflight);
+ WRITE_ONCE(el->active, el->inflight);
/* Having cancelled all outstanding process_csb(), stop their timers */
- GEM_BUG_ON(execlists->pending[0]);
- cancel_timer(&execlists->timer);
- cancel_timer(&execlists->preempt);
+ GEM_BUG_ON(el->pending[0]);
+ cancel_timer(&el->timer);
+ cancel_timer(&el->preempt);
return inactive;
}
@@ -1708,9 +1703,9 @@ static void new_timeslice(struct intel_engine_execlists *el)
static struct i915_request **
process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- u64 * const buf = execlists->csb_status;
- const u8 num_entries = execlists->csb_size;
+ struct intel_engine_execlists * const el = &engine->execlists;
+ u64 * const buf = el->csb_status;
+ const u8 num_entries = el->csb_size;
struct i915_request **prev;
u8 head, tail;
@@ -1733,8 +1728,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* to use explicit shifting and masking, and probably bifurcating
* the code to handle the legacy mmio read).
*/
- head = execlists->csb_head;
- tail = READ_ONCE(*execlists->csb_write);
+ head = el->csb_head;
+ tail = READ_ONCE(*el->csb_write);
if (unlikely(head == tail))
return inactive;
@@ -1754,7 +1749,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* simplest way is by stop processing the event queue and force the
* engine to reset.
*/
- execlists->csb_head = tail;
+ el->csb_head = tail;
ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
/*
@@ -1805,44 +1800,44 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
else
promote = gen8_csb_parse(csb);
if (promote) {
- struct i915_request * const *old = execlists->active;
+ struct i915_request * const *old = el->active;
- if (GEM_WARN_ON(!*execlists->pending)) {
- execlists->error_interrupt |= ERROR_CSB;
+ if (GEM_WARN_ON(!*el->pending)) {
+ el->error_interrupt |= ERROR_CSB;
break;
}
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
- WRITE_ONCE(execlists->active, execlists->pending);
+ WRITE_ONCE(el->active, el->pending);
smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
- trace_ports(execlists, "preempted", old);
+ trace_ports(el, "preempted", old);
while (*old)
*inactive++ = *old++;
/* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- copy_ports(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists));
+ GEM_BUG_ON(!assert_pending_valid(el, "promote"));
+ copy_ports(el->inflight,
+ el->pending,
+ execlists_num_ports(el));
smp_wmb(); /* complete the seqlock */
- WRITE_ONCE(execlists->active, execlists->inflight);
+ WRITE_ONCE(el->active, el->inflight);
/* XXX Magic delay for tgl */
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
- WRITE_ONCE(execlists->pending[0], NULL);
+ WRITE_ONCE(el->pending[0], NULL);
} else {
- if (GEM_WARN_ON(!*execlists->active)) {
- execlists->error_interrupt |= ERROR_CSB;
+ if (GEM_WARN_ON(!*el->active)) {
+ el->error_interrupt |= ERROR_CSB;
break;
}
/* port0 completed, advanced to port1 */
- trace_ports(execlists, "completed", execlists->active);
+ trace_ports(el, "completed", el->active);
/*
* We rely on the hardware being strongly
@@ -1855,8 +1850,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* itself...
*/
if (GEM_SHOW_DEBUG() &&
- !__i915_request_is_complete(*execlists->active)) {
- struct i915_request *rq = *execlists->active;
+ !__i915_request_is_complete(*el->active)) {
+ struct i915_request *rq = *el->active;
const u32 *regs __maybe_unused =
rq->context->lrc_reg_state;
@@ -1883,10 +1878,10 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
regs[CTX_RING_TAIL]);
}
- *inactive++ = *execlists->active++;
+ *inactive++ = *el->active++;
- GEM_BUG_ON(execlists->active - execlists->inflight >
- execlists_num_ports(execlists));
+ GEM_BUG_ON(el->active - el->inflight >
+ execlists_num_ports(el));
}
} while (head != tail);
@@ -1908,8 +1903,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* and merits a fresh timeslice. We reinstall the timer after
* inspecting the queue to see if we need to resumbit.
*/
- if (*prev != *execlists->active) /* elide lite-restores */
- new_timeslice(execlists);
+ if (*prev != *el->active) /* elide lite-restores */
+ new_timeslice(el);
return inactive;
}
@@ -2174,10 +2169,10 @@ static void execlists_submission_tasklet(unsigned long data)
rcu_read_unlock();
}
-static void __execlists_kick(struct intel_engine_execlists *execlists)
+static void __execlists_kick(struct intel_engine_execlists *el)
{
struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
+ container_of(el, typeof(*engine), execlists);
i915_sched_kick(&engine->active);
}
@@ -2324,8 +2319,8 @@ static int execlists_request_alloc(struct i915_request *request)
static void reset_csb_pointers(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- const unsigned int reset_value = execlists->csb_size - 1;
+ struct intel_engine_execlists * const el = &engine->execlists;
+ const unsigned int reset_value = el->csb_size - 1;
ring_set_paused(engine, 0);
@@ -2346,21 +2341,21 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
+ el->csb_head = reset_value;
+ WRITE_ONCE(*el->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
/* Check that the GPU does indeed update the CSB entries! */
- memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
+ memset(el->csb_status, -1, (reset_value + 1) * sizeof(u64));
+ invalidate_csb_entries(&el->csb_status[0],
+ &el->csb_status[reset_value]);
/* Once more for luck and our trusty paranoia */
ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
0xffff << 16 | reset_value << 8 | reset_value);
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
- GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
+ GEM_BUG_ON(READ_ONCE(*el->csb_write) != reset_value);
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -2535,10 +2530,10 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
static struct i915_request **
reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const el = &engine->execlists;
mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
+ clflush(el->csb_write);
mb();
inactive = process_csb(engine, inactive); /* drain preemption events */
@@ -2629,7 +2624,7 @@ execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const el = &engine->execlists;
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
@@ -2638,7 +2633,7 @@ static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
execlists_reset_active(engine, true);
- inactive = cancel_port_requests(execlists, inactive);
+ inactive = cancel_port_requests(el, inactive);
post_process_csb(post, inactive);
rcu_read_unlock();
}
@@ -2666,7 +2661,7 @@ static void nop_submission_tasklet(unsigned long data)
static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const el = &engine->execlists;
struct i915_request *rq, *rn;
struct i915_priolist *pl;
struct rb_node *rb;
@@ -2713,11 +2708,11 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
i915_request_mark_eio(rq);
/* Cancel all attached virtual engines */
- while ((rb = rb_first_cached(&execlists->virtual))) {
+ while ((rb = rb_first_cached(&el->virtual))) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- rb_erase_cached(rb, &execlists->virtual);
+ rb_erase_cached(rb, &el->virtual);
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.active.lock);
@@ -2914,15 +2909,15 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const el = &engine->execlists;
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
tasklet_init(&engine->active.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
- timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
+ timer_setup(&el->timer, execlists_timeslice, 0);
+ timer_setup(&el->preempt, execlists_preempt, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -2933,30 +2928,30 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
lrc_init_wa_ctx(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = uncore->regs +
+ el->submit_reg = uncore->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = uncore->regs +
+ el->ctrl_reg = uncore->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
- execlists->submit_reg = uncore->regs +
+ el->submit_reg = uncore->regs +
i915_mmio_reg_offset(RING_ELSP(base));
}
- execlists->csb_status =
+ el->csb_status =
(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
- execlists->csb_write =
+ el->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
if (INTEL_GEN(i915) < 11)
- execlists->csb_size = GEN8_CSB_ENTRIES;
+ el->csb_size = GEN8_CSB_ENTRIES;
else
- execlists->csb_size = GEN11_CSB_ENTRIES;
+ el->csb_size = GEN11_CSB_ENTRIES;
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
if (INTEL_GEN(engine->i915) >= 11) {
- execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
- execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+ el->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+ el->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
/* Finally, take ownership and responsibility for cleanup! */
@@ -3471,7 +3466,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
int indent),
unsigned int max)
{
- const struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct intel_engine_execlists *el = &engine->execlists;
struct i915_request *rq, *last;
struct i915_priolist *pl;
unsigned long flags;
@@ -3518,7 +3513,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
last = NULL;
count = 0;
- for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&el->virtual); rb; rb = rb_next(rb)) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
struct i915_request *rq;
--
2.20.1
More information about the Intel-gfx-trybot
mailing list